python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
#!/usr/bin/env python3
# Copyright 2021-2022 Xiaomi Corp. (authors: Fangjun Kuang,
# Wei Kang,
# Mingshuang Luo)
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
python3 bin/trainer.py \
--decoder-dim 1024 --nhead 16 --num-decoder-layers 12 \
--max-duration 40 --model-name valle \
--exp-dir exp/valle
--dtype "bfloat16" \
"""
import argparse
import copy
import logging
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import random
import warnings
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from icefall.checkpoint import load_checkpoint, remove_checkpoints
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.checkpoint import (
save_checkpoint_with_global_batch_idx,
update_averaged_model,
)
from icefall.dist import cleanup_dist, setup_dist
from icefall.env import get_env_info
from icefall.hooks import register_inf_check_hooks
from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool
from lhotse import CutSet
from lhotse.cut import Cut
from lhotse.dataset.sampling.base import CutSampler
from lhotse.utils import fix_random_seed
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from valle.data import TtsDataModule
from valle.models import add_model_arguments, get_model
from valle.modules.optim import Eden, Eve, ScaledAdam
from valle.modules.scheduler import get_scheduler
LRSchedulerType = torch.optim.lr_scheduler._LRScheduler
def set_batch_count(model: Union[nn.Module, DDP], batch_count: float) -> None:
if isinstance(model, DDP):
# get underlying nn.Module
model = model.module
for module in model.modules():
if hasattr(module, "batch_count"):
module.batch_count = batch_count
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=20,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="""Resume training from this epoch. It should be positive.
If larger than 1, it will load checkpoint from
exp-dir/epoch-{start_epoch-1}.pt
""",
)
parser.add_argument(
"--start-batch",
type=int,
default=0,
help="""If positive, --start-epoch is ignored and
it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt
""",
)
parser.add_argument(
"--exp-dir",
type=str,
default="exp/valle_dev",
help="""The experiment dir.
It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
""",
)
parser.add_argument(
"--optimizer-name",
type=str,
default="ScaledAdam",
help="The optimizer.",
)
parser.add_argument(
"--scheduler-name",
type=str,
default="Eden",
help="The scheduler.",
)
parser.add_argument(
"--base-lr", type=float, default=0.05, help="The base learning rate."
)
parser.add_argument(
"--warmup-steps",
type=int,
default=200,
help="""Number of steps that affects how rapidly the learning rate
decreases. We suggest not to change this.""",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="The seed for random generators intended for reproducibility",
)
parser.add_argument(
"--inf-check",
type=str2bool,
default=False,
help="Add hooks to check for infinite module outputs and gradients.",
)
parser.add_argument(
"--save-every-n",
type=int,
default=10000,
help="""Save checkpoint after processing this number of batches"
periodically. We save checkpoint to exp-dir/ whenever
params.batch_idx_train % save_every_n == 0. The checkpoint filename
has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'
Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the
end of each epoch where `xxx` is the epoch number counting from 0.
""",
)
parser.add_argument(
"--keep-last-k",
type=int,
default=20,
help="""Only keep this number of checkpoints on disk.
For instance, if it is 3, there are only 3 checkpoints
in the exp-dir with filenames `checkpoint-xxx.pt`.
It does not affect checkpoints with name `epoch-xxx.pt`.
""",
)
parser.add_argument(
"--average-period",
type=int,
default=0,
help="""Update the averaged model, namely `model_avg`, after processing
this number of batches. `model_avg` is a separate version of model,
in which each floating-point parameter is the average of all the
parameters from the start of training. Each time we take the average,
we do: `model_avg = model * (average_period / batch_idx_train) +
model_avg * ((batch_idx_train - average_period) / batch_idx_train)`.
""",
)
parser.add_argument(
"--accumulate-grad-steps",
type=int,
default=1,
help="""update gradient when batch_idx_train % accumulate_grad_steps == 0.
""",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="Training dtype: float32 bfloat16 float16.",
)
parser.add_argument(
"--filter-min-duration",
type=float,
default=0.0,
help="Keep only utterances with duration > this.",
)
parser.add_argument(
"--filter-max-duration",
type=float,
default=20.0,
help="Keep only utterances with duration < this.",
)
parser.add_argument(
"--train-stage",
type=int,
default=0,
help="""0: train all modules, For VALL-E, support 1: AR Decoder 2: NAR Decoder(s)
""",
)
add_model_arguments(parser)
return parser
def get_params() -> AttributeDict:
"""Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
are saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval is 0
"""
params = AttributeDict(
{
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 100, # 10: debug 100: train
"reset_interval": 200,
"valid_interval": 10000,
# parameters for TTS
"env_info": get_env_info(),
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
model_avg: nn.Module = None,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[LRSchedulerType] = None,
) -> Optional[Dict[str, Any]]:
"""Load checkpoint from file.
If params.start_batch is positive, it will load the checkpoint from
`params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if
params.start_epoch is larger than 1, it will load the checkpoint from
`params.start_epoch - 1`.
Apart from loading state dict for `model` and `optimizer` it also updates
`best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
model_avg:
The stored model averaged from the start of training.
optimizer:
The optimizer that we are using.
scheduler:
The scheduler that we are using.
Returns:
Return a dict containing previously saved training info.
"""
if params.start_batch > 0:
filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt"
elif params.start_epoch > 1:
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
else:
return None
assert filename.is_file(), f"{filename} does not exist!"
if isinstance(model, DDP):
raise ValueError("load_checkpoint before DDP")
saved_params = load_checkpoint(
filename,
model=model,
model_avg=model_avg,
optimizer=optimizer,
scheduler=scheduler,
)
saved_stage = saved_params.get("train_stage", 0)
if params.train_stage != saved_stage:
# switch training stage
if params.train_stage and saved_stage: # switch between 1 and 2
params.start_epoch = 1
params.start_batch = 0
else:
# switch between 0 and 1/2
assert params.num_epochs >= params.start_epoch
params.batch_idx_train = saved_params["batch_idx_train"]
for key in ["optimizer", "grad_scaler", "sampler"]:
if key in saved_params:
saved_params.pop(key)
# when base on stage 0, we keep scheduler
if saved_stage != 0:
for key in ["scheduler"]:
if key in saved_params:
saved_params.pop(key)
best_train_filename = params.exp_dir / "best-train-loss.pt"
if best_train_filename.is_file():
copyfile(
src=best_train_filename,
dst=params.exp_dir / f"best-train-loss-stage{saved_stage}.pt",
)
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
if best_valid_filename.is_file():
copyfile(
src=best_valid_filename,
dst=params.exp_dir / f"best-valid-loss-stage{saved_stage}.pt",
)
else:
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
if params.start_batch > 0:
if "cur_epoch" in saved_params:
params["start_epoch"] = saved_params["cur_epoch"]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: Union[nn.Module, DDP],
model_avg: Optional[nn.Module] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[LRSchedulerType] = None,
sampler: Optional[CutSampler] = None,
scaler: Optional[GradScaler] = None,
rank: int = 0,
) -> None:
"""Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
model_avg:
The stored model averaged from the start of training.
optimizer:
The optimizer used in the training.
sampler:
The sampler for the training dataset.
scaler:
The scaler used for mix precision training.
"""
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
model_avg=model_avg,
params=params,
optimizer=optimizer,
scheduler=scheduler,
sampler=sampler,
scaler=scaler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: Union[nn.Module, DDP],
batch: dict,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
Compute transducer loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of Zipformer in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
warmup: a floating point value which increases throughout training;
values >= 1.0 are fully warmed up and have all modules present.
"""
device = (
model.device
if isinstance(model, DDP)
else next(model.parameters()).device
)
# at entry, TextTokens is (N, P)
text_tokens = batch["text_tokens"].to(device)
text_tokens_lens = batch["text_tokens_lens"].to(device)
assert text_tokens.ndim == 2
audio_features = batch["audio_features"].to(device)
audio_features_lens = batch["audio_features_lens"].to(device)
assert audio_features.ndim == 3
with torch.set_grad_enabled(is_training):
predicts, loss, metrics = model(
x=text_tokens,
x_lens=text_tokens_lens,
y=audio_features,
y_lens=audio_features_lens,
train_stage=params.train_stage,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
info["frames"] = (audio_features_lens).sum().item()
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
for metric in metrics:
info[metric] = metrics[metric].detach().cpu().item()
del metrics
return predicts, loss, info
def compute_validation_loss(
params: AttributeDict,
model: Union[nn.Module, DDP],
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
"""Run the validation process."""
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
predicts, loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
if False:
from valle.models import visualize
output_dir = Path(
f"{params.exp_dir}/eval/step-{params.batch_idx_train:06d}"
)
output_dir.mkdir(parents=True, exist_ok=True)
visualize(predicts, batch, output_dir=output_dir)
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: Union[nn.Module, DDP],
optimizer: torch.optim.Optimizer,
scheduler: LRSchedulerType,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
rng: random.Random,
scaler: GradScaler,
model_avg: Optional[nn.Module] = None,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
rank: int = 0,
) -> None:
"""Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
scheduler:
The learning rate scheduler, we call step() every step.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
rng:
Random for selecting.
scaler:
The scaler used for mix precision training.
model_avg:
The stored model averaged from the start of training.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
rank:
The rank of the node in DDP training. If no DDP is used, it should
be set to 0.
"""
model.train()
tot_loss = MetricsTracker()
iter_dl = iter(train_dl)
dtype, enabled = torch.float32, False
if params.dtype in ["bfloat16", "bf16"]:
dtype, enabled = torch.bfloat16, True
elif params.dtype in ["float16", "fp16"]:
dtype, enabled = torch.float16, True
def evaluate():
logging.info("Computing validation loss")
with torch.cuda.amp.autocast(dtype=dtype):
valid_info = compute_validation_loss(
params=params,
model=model,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}")
logging.info(
f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB"
)
if tb_writer is not None:
valid_info.write_summary(
tb_writer, "train/valid_", params.batch_idx_train
)
batch_idx = 0
while True:
try:
batch = next(iter_dl)
except StopIteration:
logging.info("Reaches end of dataloader.")
if params.batch_idx_train % params.accumulate_grad_steps:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
break
batch_idx += 1
params.batch_idx_train += 1
batch_size = len(batch["text"])
try:
with torch.cuda.amp.autocast(dtype=dtype, enabled=enabled):
_, loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
is_training=True,
)
# summary stats
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
# NOTE: We use reduction==sum and loss is computed over utterances
# in the batch and there is no normalization to it so far.
scaler.scale(loss).backward()
if params.batch_idx_train >= params.accumulate_grad_steps:
if params.batch_idx_train % params.accumulate_grad_steps == 0:
if params.optimizer_name not in ["ScaledAdam", "Eve"]:
# Unscales the gradients of optimizer's assigned params in-place
scaler.unscale_(optimizer)
# Since the gradients of optimizer's assigned params are unscaled, clips as usual:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
for k in range(params.accumulate_grad_steps):
if isinstance(scheduler, Eden):
scheduler.step_batch(params.batch_idx_train)
else:
scheduler.step()
set_batch_count(model, params.batch_idx_train)
except: # noqa
display_and_save_batch(batch, params=params)
raise
if params.average_period > 0:
if (
rank == 0
and params.batch_idx_train > 0
and params.batch_idx_train % params.average_period == 0
):
update_averaged_model(
params=params,
model_cur=model,
model_avg=model_avg,
)
if (
params.batch_idx_train > 0
and params.batch_idx_train % params.save_every_n == 0
):
save_checkpoint_with_global_batch_idx(
out_dir=params.exp_dir,
global_batch_idx=params.batch_idx_train,
model=model,
model_avg=model_avg,
params=params,
optimizer=optimizer,
scheduler=scheduler,
sampler=train_dl.sampler,
scaler=scaler,
rank=rank,
)
remove_checkpoints(
out_dir=params.exp_dir,
topk=params.keep_last_k,
rank=rank,
)
if batch_idx % 100 == 0 and params.dtype in ["float16", "fp16"]:
# If the grad scale was less than 1, try increasing it. The _growth_interval
# of the grad scaler is configurable, but we can't configure it to have different
# behavior depending on the current grad scale.
cur_grad_scale = scaler._scale.item()
if cur_grad_scale < 1.0 or (
cur_grad_scale < 8.0 and batch_idx % 400 == 0
):
scaler.update(cur_grad_scale * 2.0)
if cur_grad_scale < 0.01:
logging.warning(f"Grad scale is small: {cur_grad_scale}")
if cur_grad_scale < 1.0e-05:
raise RuntimeError(
f"grad_scale is too small, exiting: {cur_grad_scale}"
)
if batch_idx % params.log_interval == 0:
cur_lr = scheduler.get_last_lr()[0]
cur_grad_scale = (
scaler._scale.item()
if params.dtype in ["float16", "fp16"]
else 1.0
)
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, train_loss[{loss_info}], "
f"tot_loss[{tot_loss}], "
f"batch size: {batch_size}, "
f"lr: {cur_lr:.2e}"
+ (
f", grad_scale: {cur_grad_scale}"
if params.dtype in ["float16", "fp16"]
else ""
)
)
if tb_writer is not None:
tb_writer.add_scalar(
"train/learning_rate", cur_lr, params.batch_idx_train
)
loss_info.write_summary(
tb_writer,
"train/current_",
params.batch_idx_train,
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if params.dtype in ["float16", "fp16"]:
tb_writer.add_scalar(
"train/grad_scale",
cur_grad_scale,
params.batch_idx_train,
)
if params.batch_idx_train % params.valid_interval == 0:
evaluate()
if True: # eval every epoch
evaluate()
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def filter_short_and_long_utterances(
cuts: CutSet, min_duration: float, max_duration: float
) -> CutSet:
def remove_short_and_long_utt(c: Cut):
# Keep only utterances with duration between 0.6 second and 20 seconds
if c.duration < min_duration or c.duration > max_duration:
# logging.warning(
# f"Exclude cut with ID {c.id} from training. Duration: {c.duration}"
# )
return False
return True
cuts = cuts.filter(remove_short_and_long_utt)
return cuts
def run(rank, world_size, args):
"""
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
"""
params = get_params()
params.update(vars(args))
fix_random_seed(params.seed)
rng = random.Random(params.seed)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
if args.tensorboard and rank == 0:
if params.train_stage:
tb_writer = SummaryWriter(
log_dir=f"{params.exp_dir}/tensorboard_stage{params.train_stage}"
)
else:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
# https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
logging.info(f"Device: {device}")
logging.info(params)
logging.info("About to create model")
model = get_model(params)
num_param = sum([p.numel() for p in model.parameters()])
logging.info(f"Number of model parameters: {num_param}")
assert params.save_every_n >= params.average_period
model_avg: Optional[nn.Module] = None
if rank == 0 and params.average_period > 0:
# model_avg is only used with rank 0
model_avg = copy.deepcopy(model).to(torch.float64)
assert params.start_epoch > 0, params.start_epoch
checkpoints = load_checkpoint_if_available(
params=params, model=model, model_avg=model_avg
)
model.to(device)
if world_size > 1:
logging.info("Using DDP")
model = DDP(model, device_ids=[rank], find_unused_parameters=True)
if params.train_stage:
_model = model.module if isinstance(model, DDP) else model
model_parameters = _model.stage_parameters(params.train_stage)
else:
model_parameters = model.parameters()
if params.optimizer_name == "ScaledAdam":
parameters_names = []
if params.train_stage: # != 0
_model = model.module if isinstance(model, DDP) else model
parameters_names.append(
[
name_param_pair[0]
for name_param_pair in _model.stage_named_parameters(
params.train_stage
)
]
)
else:
parameters_names.append(
[
name_param_pair[0]
for name_param_pair in model.named_parameters()
]
)
optimizer = ScaledAdam(
model_parameters,
lr=params.base_lr,
betas=(0.9, 0.95),
clipping_scale=2.0,
parameters_names=parameters_names,
show_dominant_parameters=False,
clipping_update_period=1000,
)
elif params.optimizer_name == "Eve":
optimizer = Eve(
model_parameters,
lr=params.base_lr,
betas=(0.9, 0.98),
)
elif params.optimizer_name == "AdamW":
optimizer = torch.optim.AdamW(
model_parameters,
lr=params.base_lr,
betas=(0.9, 0.95),
weight_decay=1e-2,
eps=1e-8,
)
elif params.optimizer_name == "Adam":
optimizer = torch.optim.Adam(
model_parameters,
lr=params.base_lr,
betas=(0.9, 0.95),
eps=1e-8,
)
else:
raise NotImplementedError()
scheduler = get_scheduler(params, optimizer)
optimizer.zero_grad()
if checkpoints and "optimizer" in checkpoints:
logging.info("Loading optimizer state dict")
optimizer.load_state_dict(checkpoints["optimizer"])
if (
checkpoints
and "scheduler" in checkpoints
and checkpoints["scheduler"] is not None
):
logging.info("Loading scheduler state dict")
scheduler.load_state_dict(checkpoints["scheduler"])
if params.inf_check:
register_inf_check_hooks(model)
if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
sampler_state_dict = checkpoints["sampler"]
else:
sampler_state_dict = None
dataset = TtsDataModule(args)
train_cuts = dataset.train_cuts()
valid_cuts = dataset.dev_cuts()
train_cuts = filter_short_and_long_utterances(
train_cuts, params.filter_min_duration, params.filter_max_duration
)
valid_cuts = filter_short_and_long_utterances(
valid_cuts, params.filter_min_duration, params.filter_max_duration
)
train_dl = dataset.train_dataloaders(
train_cuts, sampler_state_dict=sampler_state_dict
)
valid_dl = dataset.valid_dataloaders(valid_cuts)
if True:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=train_dl,
optimizer=optimizer,
params=params,
)
scaler = GradScaler(
enabled=(params.dtype in ["fp16", "float16"]), init_scale=1.0
)
if checkpoints and "grad_scaler" in checkpoints:
logging.info("Loading grad scaler state dict")
scaler.load_state_dict(checkpoints["grad_scaler"])
for epoch in range(params.start_epoch, params.num_epochs + 1):
if isinstance(scheduler, Eden):
scheduler.step_epoch(epoch - 1)
fix_random_seed(params.seed + epoch - 1)
train_dl.sampler.set_epoch(epoch - 1)
if tb_writer is not None:
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
model_avg=model_avg,
optimizer=optimizer,
scheduler=scheduler,
train_dl=train_dl,
valid_dl=valid_dl,
rng=rng,
scaler=scaler,
tb_writer=tb_writer,
world_size=world_size,
rank=rank,
)
save_checkpoint(
params=params,
model=model,
model_avg=model_avg,
optimizer=optimizer,
scheduler=scheduler,
sampler=train_dl.sampler,
scaler=scaler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def display_and_save_batch(
batch: dict,
params: AttributeDict,
) -> None:
"""Display the batch statistics and save the batch into disk.
Args:
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
params:
Parameters for training. See :func:`get_params`.
"""
from lhotse.utils import uuid4
filename = f"{params.exp_dir}/batch-{uuid4()}.pt"
logging.info(f"Saving batch to {filename}")
torch.save(batch, filename)
def scan_pessimistic_batches_for_oom(
model: Union[nn.Module, DDP],
train_dl: torch.utils.data.DataLoader,
optimizer: torch.optim.Optimizer,
params: AttributeDict,
):
from lhotse.dataset import find_pessimistic_batches
logging.info(
"Sanity check -- see if any of the batches in epoch 1 would cause OOM."
)
batches, crit_values = find_pessimistic_batches(train_dl.sampler)
dtype = torch.float32
if params.dtype in ["bfloat16", "bf16"]:
dtype = torch.bfloat16
elif params.dtype in ["float16", "fp16"]:
dtype = torch.float16
for criterion, cuts in batches.items():
batch = train_dl.dataset[cuts]
try:
with torch.cuda.amp.autocast(dtype=dtype):
_, loss, _ = compute_loss(
params=params,
model=model,
batch=batch,
is_training=True,
)
loss.backward()
optimizer.zero_grad()
except Exception as e:
if "CUDA out of memory" in str(e):
logging.error(
"Your GPU ran out of memory with the current "
"max_duration setting. We recommend decreasing "
"max_duration and trying again.\n"
f"Failing criterion: {criterion} "
f"(={crit_values[criterion]}) ..."
)
display_and_save_batch(batch, params=params)
raise
logging.info(
f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB"
)
def main():
parser = get_parser()
TtsDataModule.add_arguments(parser)
args = parser.parse_args()
args.exp_dir = Path(args.exp_dir)
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/bin/trainer.py |
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from icefall.utils import AttributeDict
from torchmetrics.classification import MulticlassAccuracy
from valle.data.input_strategies import PromptedFeatures
from valle.models import NUM_MEL_BINS, get_model
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.devices = [torch.device("cpu")]
if torch.cuda.is_available():
cls.devices.append(torch.device("cuda", 0))
if torch.cuda.device_count() > 1:
torch.cuda.set_device(1)
cls.devices.append(torch.device("cuda", 1))
def test_vallf(self):
params = AttributeDict()
params.decoder_dim = 64
params.nhead = 16
params.num_decoder_layers = 4
x = torch.from_numpy(np.random.randint(0, 100, size=[4, 8]))
x_lens = torch.from_numpy(np.random.randint(4, 8, size=[4]))
x_lens[-1] = 8
enroll_x_lens = torch.from_numpy(np.random.randint(1, 3, size=[4]))
y = torch.from_numpy(np.random.randint(0, 1000, size=[4, 16, 8]))
y_lens = torch.from_numpy(np.random.randint(8, 16, size=[4]))
y_lens[-1] = 16
params.norm_first = True
params.add_prenet = False
params.model_name = "VALL-F"
params.share_embedding = True
params.scale_factor = 1.0
for device in self.devices:
for mode in [0, 1, 2]:
params.prefix_mode = mode
# VALL-E
model = get_model(params)
# VALL-F
model.to(device)
x = x.to(device)
x_lens = x_lens.to(device)
y = y.to(device)
y_lens = y_lens.to(device)
# Training
for train_stage in [0, 1, 2]:
codes, loss, metrics = model(
x, x_lens, y, y_lens, train_stage=train_stage
)
# Inference
model.eval()
codes = model.inference(
x[-1:],
x_lens[-1:],
y[-1:],
enroll_x_lens=enroll_x_lens[-1:],
)
def test_valle(self):
params = AttributeDict()
params.decoder_dim = 64
params.nhead = 16
params.num_decoder_layers = 4
x = torch.from_numpy(np.random.randint(0, 100, size=[4, 8]))
x_lens = torch.from_numpy(np.random.randint(4, 8, size=[4]))
x_lens[-1] = 8
enroll_x_lens = torch.from_numpy(np.random.randint(1, 3, size=[4]))
y = torch.from_numpy(np.random.randint(0, 1000, size=[4, 16, 8]))
y_lens = torch.from_numpy(np.random.randint(8, 16, size=[4]))
y_lens[-1] = 16
params.norm_first = False
params.add_prenet = True
params.model_name = "VALL-E"
params.share_embedding = True
params.scale_factor = 1.0
for device in self.devices:
for mode in [0, 1, 2]:
params.prefix_mode = mode
# VALL-E
model = get_model(params)
model.to(device)
x = x.to(device)
x_lens = x_lens.to(device)
y = y.to(device)
y_lens = y_lens.to(device)
# Training
codes, loss, metrics = model(x, x_lens, y, y_lens)
# Inference
model.eval()
codes = model.inference(
x[-1:], x_lens[-1:], y[-1:], enroll_x_lens=enroll_x_lens
)
params.scale_factor = 0.5
def test_vallef_prefix4(self):
params = AttributeDict()
params.decoder_dim = 64
params.nhead = 16
params.num_decoder_layers = 4
x = torch.from_numpy(np.random.randint(0, 100, size=[4, 8]))
x_lens = torch.from_numpy(np.random.randint(4, 8, size=[4]))
x_lens[-1] = 8
enroll_x_lens = torch.from_numpy(np.random.randint(1, 3, size=[4]))
y = torch.from_numpy(np.random.randint(0, 1000, size=[4, 16, 8]))
y_lens = torch.from_numpy(np.random.randint(8, 16, size=[4]))
y_lens[-1] = 16
prompts = torch.from_numpy(np.random.randint(0, 1000, size=[4, 12, 8]))
prompts_lens = torch.from_numpy(np.random.randint(12, 13, size=[4]))
params.norm_first = False
params.add_prenet = True
params.share_embedding = False
params.scale_factor = 1.0
for device in self.devices:
for model_name in ["VALL-E", "VALL-F"]:
for mode in [4]:
params.prefix_mode = mode
params.model_name = model_name
# VALL-E
model = get_model(params)
model.to(device)
x = x.to(device)
x_lens = x_lens.to(device)
y = y.to(device)
_y = PromptedFeatures(prompts, y).to(device)
_y_lens = PromptedFeatures(prompts_lens, y_lens).to(device)
# Training
codes, loss, metrics = model(x, x_lens, _y, _y_lens)
# Inference
model.eval()
codes = model.inference(
x[-1:], x_lens[-1:], y[-1:], enroll_x_lens=enroll_x_lens
)
def test_topmetric(self):
metric_top10 = MulticlassAccuracy(1024, top_k=10, average="micro")
metric_top1 = MulticlassAccuracy(1024, top_k=1, average="micro")
batch_size, seq_len = 4, 16
targets = np.random.randint(0, 1000, size=[batch_size, seq_len])
logits = np.random.random([batch_size, 1024, seq_len]).astype(
np.float32
)
larger_logits = np.clip(logits, -1.0, 1.0)
smaller_logits = np.clip(logits, -1.0, 1.0)
for b in range(batch_size):
for t in range(seq_len):
assert targets[b, t] >= 0
larger_logits[b, targets[b, t], t] = 2.0
smaller_logits[b, targets[b, t], t] = -2.0
targets = torch.from_numpy(targets)
larger_logits = torch.from_numpy(larger_logits)
smaller_logits = torch.from_numpy(smaller_logits)
for device in self.devices:
metric_top10.to(device)
metric_top1.to(device)
targets = targets.to(device)
one = metric_top10(larger_logits.to(device), targets)
assert one.cpu().item() == 1.0, one.cpu().item()
zero = metric_top1(smaller_logits.to(device), targets)
assert zero.cpu().item() == 0.0, zero.cpu().item()
half = metric_top1(
torch.concat(
[smaller_logits.to(device), larger_logits.to(device)], dim=2
),
torch.concat([targets, targets], dim=1),
)
assert half.cpu().item() == 0.5, half.cpu().item()
def test_transformer(self):
params = AttributeDict()
params.decoder_dim = 64
params.nhead = 4
params.num_decoder_layers = 4
x = torch.from_numpy(np.random.randint(0, 100, size=[4, 8]))
x_lens = torch.from_numpy(np.random.randint(4, 8, size=[4]))
x_lens[-1] = 8
y = torch.from_numpy(
np.random.random((4, 16, NUM_MEL_BINS)).astype(np.float32)
)
y_lens = torch.from_numpy(np.random.randint(8, 16, size=[4]))
y_lens[-1] = 16
params.model_name = "Transformer"
params.norm_first = False
params.add_prenet = True
for device in self.devices:
# Transformer
model = get_model(params)
num_param = sum([p.numel() for p in model.parameters()])
model.to(device)
x = x.to(device)
x_lens = x_lens.to(device)
y = y.to(device)
y_lens = y_lens.to(device)
# Training
codes, loss, metrics = model(x, x_lens, y, y_lens)
# Inference
model.eval()
codes = model.inference(x[-1:], x_lens[-1:])
params.add_prenet = False
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/tests/model_test.py |
# Copyright 2023 (authors: Zhao Ming)
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from valle.data import TextTokenizer
class TestTextTokenizer(unittest.TestCase):
def test_espeak(self):
text_tokenizer = TextTokenizer(backend="espeak")
for (_input, _target) in [
("The two parties, the sheep and the wolves, met each other.",
['ð', 'ə', '_', 't', 'uː', '_', 'p', 'ɑːɹ', 'ɾ', 'i', 'z', ',', '_', 'ð']),
("Mother! dear father! do you hear me?",
['m', 'ʌ', 'ð', 'ɚ', '!', '_', 'd', 'ɪɹ', '_', 'f', 'ɑː', 'ð', 'ɚ', '!']),
("\"Whoever thou art,\" She exclaimed, suddenly seizing Rodolfo's hand,",
['"', 'h', 'uː', 'ɛ', 'v', 'ɚ', '_', 'ð', 'aʊ', '_', 'ɑːɹ', 't', ',', '"', '_', 'ʃ', 'iː',
'_', 'ɛ', 'k', 's', 'k', 'l', 'eɪ', 'm', 'd', ',', '_', 's', 'ʌ', 'd', 'ə', 'n', 'l', 'i',
'_', 's', 'iː', 'z', 'ɪ', 'ŋ', '_', 'ɹ', 'ə', 'd', 'ɑː', 'l', 'f', 'oʊ', 'z', '_', 'h',
'æ', 'n', 'd', ','])
]:
phonemized = text_tokenizer(_input)
self.assertEqual(phonemized[0][:len(_target)], _target)
def test_pypinyin(self):
text_tokenizer = TextTokenizer(backend="pypinyin")
for (_input, _target) in [
("你好这是测试",
["ni3", '-', "hao3", '-', "zhe4", '-', "shi4", '-', "ce4", '-', "shi4"]),
("\"你好\", 这是测试.",
["\"", "ni3", '-', "hao3", "\"", ",", '_', "zhe4", '-', "shi4", '-', "ce4", '-', "shi4", "."]),
("此项 工作 还能 怎么 改进",
['ci3', '-', 'xiang4', '_', 'gong1', '-', 'zuo4', '_',
'hai2', '-', 'neng2', '_', 'zen3', '-', 'me5', '_', 'gai3', '-', 'jin4']), # AISHELL
]:
phonemized = text_tokenizer(_input)
self.assertEqual(phonemized[0], _target)
def test_pypinyin_initials_finals(self):
text_tokenizer = TextTokenizer(backend="pypinyin_initials_finals")
for (_input, _target) in [
("你好这是测试",
["n", "i3", "-", "h", "ao3", "-", "zh", "e4", "-", "sh", "i4", "-", "c", "e4", "-", "sh", "i4"],
),
("\"你好.这是测试.",
["\"", "n", "i3", "-", "h", "ao3", ".", "zh", "e4", "-", "sh", "i4", "-", "c", "e4", "-", "sh", "i4", "."],
),
("\"你好. 这是测试.",
["\"", "n", "i3", "-", "h", "ao3", ".", "_", "zh", "e4", "-", "sh", "i4", "-", "c", "e4", "-", "sh", "i4", "."],
),
("此项 工作 还能 怎么 改进", ['c', 'i3', '-', 'x', 'iang4', '_', 'g', 'ong1', '-', 'z', 'uo4', '_',
'h', 'ai2', '-', 'n', 'eng2', '_', 'z', 'en3', '-', 'm', 'e5', '_',
'g', 'ai3', '-', 'j', 'in4']), # AISHELL
]:
phonemized = text_tokenizer(_input)
self.assertListEqual(phonemized[0], _target)
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/tests/data/tokenizer_test.py |
# Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import TypeVar
from typing import Union
Symbol = TypeVar('Symbol')
# Disable __repr__ otherwise it could freeze e.g. Jupyter.
@dataclass(repr=False)
class SymbolTable(Generic[Symbol]):
'''SymbolTable that maps symbol IDs, found on the FSA arcs to
actual objects. These objects can be arbitrary Python objects
that can serve as keys in a dictionary (i.e. they need to be
hashable and immutable).
The SymbolTable can only be read to/written from disk if the
symbols are strings.
'''
_id2sym: Dict[int, Symbol] = field(default_factory=dict)
'''Map an integer to a symbol.
'''
_sym2id: Dict[Symbol, int] = field(default_factory=dict)
'''Map a symbol to an integer.
'''
_next_available_id: int = 1
'''A helper internal field that helps adding new symbols
to the table efficiently.
'''
eps: Symbol = '<eps>'
'''Null symbol, always mapped to index 0.
'''
def __post_init__(self):
for idx, sym in self._id2sym.items():
assert self._sym2id[sym] == idx
assert idx >= 0
for sym, idx in self._sym2id.items():
assert idx >= 0
assert self._id2sym[idx] == sym
if 0 not in self._id2sym:
self._id2sym[0] = self.eps
self._sym2id[self.eps] = 0
else:
assert self._id2sym[0] == self.eps
assert self._sym2id[self.eps] == 0
self._next_available_id = max(self._id2sym) + 1
@staticmethod
def from_str(s: str) -> 'SymbolTable':
'''Build a symbol table from a string.
The string consists of lines. Every line has two fields separated
by space(s), tab(s) or both. The first field is the symbol and the
second the integer id of the symbol.
Args:
s:
The input string with the format described above.
Returns:
An instance of :class:`SymbolTable`.
'''
id2sym: Dict[int, str] = dict()
sym2id: Dict[str, int] = dict()
for line in s.split('\n'):
fields = line.split()
if len(fields) == 0:
continue # skip empty lines
assert len(fields) == 2, \
f'Expect a line with 2 fields. Given: {len(fields)}'
sym, idx = fields[0], int(fields[1])
assert sym not in sym2id, f'Duplicated symbol {sym}'
assert idx not in id2sym, f'Duplicated id {idx}'
id2sym[idx] = sym
sym2id[sym] = idx
eps = id2sym.get(0, '<eps>')
return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=eps)
@staticmethod
def from_file(filename: str) -> 'SymbolTable':
'''Build a symbol table from file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
Returns:
An instance of :class:`SymbolTable`.
'''
with open(filename, 'r', encoding='utf-8') as f:
return SymbolTable.from_str(f.read().strip())
def to_str(self) -> str:
'''
Returns:
Return a string representation of this object. You can pass
it to the method ``from_str`` to recreate an identical object.
'''
s = ''
for idx, symbol in sorted(self._id2sym.items()):
s += f'{symbol} {idx}\n'
return s
def to_file(self, filename: str):
'''Serialize the SymbolTable to a file.
Every line in the symbol table file has two fields separated by
space(s), tab(s) or both. The following is an example file:
.. code-block::
<eps> 0
a 1
b 2
c 3
Args:
filename:
Name of the symbol table file. Its format is documented above.
'''
with open(filename, 'w') as f:
for idx, symbol in sorted(self._id2sym.items()):
print(symbol, idx, file=f)
def add(self, symbol: Symbol, index: Optional[int] = None) -> int:
'''Add a new symbol to the SymbolTable.
Args:
symbol:
The symbol to be added.
index:
Optional int id to which the symbol should be assigned.
If it is not available, a ValueError will be raised.
Returns:
The int id to which the symbol has been assigned.
'''
# Already in the table? Return its ID.
if symbol in self._sym2id:
return self._sym2id[symbol]
# Specific ID not provided - use next available.
if index is None:
index = self._next_available_id
# Specific ID provided but not available.
if index in self._id2sym:
raise ValueError(f"Cannot assign id '{index}' to '{symbol}' - "
f"already occupied by {self._id2sym[index]}")
self._sym2id[symbol] = index
self._id2sym[index] = symbol
# Update next available ID if needed
if self._next_available_id <= index:
self._next_available_id = index + 1
return index
def get(self, k: Union[int, Symbol]) -> Union[Symbol, int]:
'''Get a symbol for an id or get an id for a symbol
Args:
k:
If it is an id, it tries to find the symbol corresponding
to the id; if it is a symbol, it tries to find the id
corresponding to the symbol.
Returns:
An id or a symbol depending on the given `k`.
'''
if isinstance(k, int):
return self._id2sym[k]
else:
return self._sym2id[k]
def merge(self, other: 'SymbolTable') -> 'SymbolTable':
'''Create a union of two SymbolTables.
Raises an AssertionError if the same IDs are occupied by
different symbols.
Args:
other:
A symbol table to merge with ``self``.
Returns:
A new symbol table.
'''
self._check_compatible(other)
id2sym = {**self._id2sym, **other._id2sym}
sym2id = {**self._sym2id, **other._sym2id}
return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=self.eps)
def _check_compatible(self, other: 'SymbolTable') -> None:
# Epsilon compatibility
assert self.eps == other.eps, f'Mismatched epsilon symbol: ' \
f'{self.eps} != {other.eps}'
# IDs compatibility
common_ids = set(self._id2sym).intersection(other._id2sym)
for idx in common_ids:
assert self[idx] == other[idx], f'ID conflict for id: {idx}, ' \
f'self[idx] = "{self[idx]}", ' \
f'other[idx] = "{other[idx]}"'
# Symbols compatibility
common_symbols = set(self._sym2id).intersection(other._sym2id)
for sym in common_symbols:
assert self[sym] == other[sym], f'ID conflict for id: {sym}, ' \
f'self[sym] = "{self[sym]}", ' \
f'other[sym] = "{other[sym]}"'
def __getitem__(self, item: Union[int, Symbol]) -> Union[Symbol, int]:
return self.get(item)
def __contains__(self, item: Union[int, Symbol]) -> bool:
if isinstance(item, int):
return item in self._id2sym
else:
return item in self._sym2id
def __len__(self) -> int:
return len(self._id2sym)
def __eq__(self, other: 'SymbolTable') -> bool:
if len(self) != len(other):
return False
for s in self.symbols:
if self[s] != other[s]:
return False
return True
@property
def ids(self) -> List[int]:
'''Returns a list of integer IDs corresponding to the symbols.
'''
ans = list(self._id2sym.keys())
ans.sort()
return ans
@property
def symbols(self) -> List[Symbol]:
'''Returns a list of symbols (e.g., strings) corresponding to
the integer IDs.
'''
ans = list(self._sym2id.keys())
ans.sort()
return ans
| EXA-1-master | exa/models/valle/vall-e-main/valle/utils/symbol_table.py |
from .symbol_table import SymbolTable
| EXA-1-master | exa/models/valle/vall-e-main/valle/utils/__init__.py |
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Iterator, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from icefall.utils import make_pad_mask
from torchmetrics.classification import MulticlassAccuracy
from valle.data.input_strategies import PromptedFeatures
from valle.modules.embedding import SinePositionalEmbedding, TokenEmbedding
from valle.modules.transformer import (
AdaptiveLayerNorm,
LayerNorm,
TransformerDecoderLayer,
TransformerEncoder,
TransformerEncoderLayer,
)
NUM_TEXT_TOKENS = 512
NUM_AUDIO_TOKENS = 1024 # EnCodec RVQ bins
NUM_MEL_BINS = 100 # BigVGAN bigvgan_24khz_100band
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
# NOTE: There are two ways to implement the model
# 1) [VALL-F] standard TransformerDecoder, use x as memory
# 2) [VALL-E] modified TransformerDecoder like GPT-x(e.g. causal TransformerEncoder),
# use x as the prefix of decoder inputs
class VALLF(nn.Module):
"""It implements https://arxiv.org/abs/2301.02111
"Neural Codec Language Models are Zero-Shot Text to Speech Synthesizers"
"""
def __init__(
self,
d_model: int,
nhead: int,
num_layers: int,
norm_first: bool = True,
add_prenet: bool = False,
decoder_cls: Union[
nn.TransformerDecoder, nn.TransformerEncoder
] = nn.TransformerDecoder,
decoder_layer_cls: Union[
TransformerDecoderLayer, TransformerEncoderLayer
] = TransformerDecoderLayer,
prefix_mode: int = 0,
share_embedding: bool = True,
nar_scale_factor: float = 1.0,
):
"""
Args:
d_model:
The number of expected features in the input (required).
nhead:
The number of heads in the multiheadattention models (required).
num_layers:
The number of sub-decoder-layers in the decoder (required).
"""
super().__init__()
nar_d_model = int(d_model * nar_scale_factor)
self.ar_text_embedding = TokenEmbedding(d_model, NUM_TEXT_TOKENS) # W_x
self.nar_text_embedding = TokenEmbedding(nar_d_model, NUM_TEXT_TOKENS)
self.ar_audio_embedding = TokenEmbedding(d_model, NUM_AUDIO_TOKENS + 1)
self.nar_audio_embeddings = nn.ModuleList(
[TokenEmbedding(nar_d_model, NUM_AUDIO_TOKENS + 1)]
+ [TokenEmbedding(nar_d_model, NUM_AUDIO_TOKENS) for i in range(7)]
) # W_a
# PreNet
if add_prenet:
self.ar_text_prenet = nn.Sequential(
Transpose(),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
Transpose(),
nn.Linear(d_model, d_model),
)
self.ar_audio_prenet = nn.Sequential(
nn.Linear(d_model, 256),
nn.ReLU(),
nn.Dropout(0.25),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.25),
nn.Linear(256, d_model),
)
self.nar_text_prenet = nn.Sequential(
Transpose(),
nn.Conv1d(
nar_d_model, nar_d_model, kernel_size=5, padding="same"
),
nn.BatchNorm1d(nar_d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(
nar_d_model, nar_d_model, kernel_size=5, padding="same"
),
nn.BatchNorm1d(nar_d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(
nar_d_model, nar_d_model, kernel_size=5, padding="same"
),
nn.BatchNorm1d(nar_d_model),
nn.ReLU(),
nn.Dropout(0.5),
Transpose(),
nn.Linear(nar_d_model, nar_d_model),
)
self.nar_audio_prenet = nn.Sequential(
nn.Linear(nar_d_model, 256),
nn.ReLU(),
nn.Dropout(0.25),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.25),
nn.Linear(256, nar_d_model),
)
else:
self.ar_text_prenet = nn.Identity()
self.ar_audio_prenet = nn.Identity()
self.nar_text_prenet = nn.Identity()
self.nar_audio_prenet = nn.Identity()
self.ar_text_position = SinePositionalEmbedding(
d_model,
dropout=0.1,
scale=False,
alpha=True,
)
self.ar_audio_position = SinePositionalEmbedding(
d_model,
dropout=0.1,
scale=False,
alpha=True,
)
self.nar_text_position = SinePositionalEmbedding(
nar_d_model,
dropout=0.0,
scale=False,
alpha=False,
)
self.nar_audio_position = SinePositionalEmbedding(
nar_d_model,
dropout=0.1,
scale=False,
alpha=False,
)
self.ar_decoder = decoder_cls(
decoder_layer_cls(
d_model,
nhead,
dim_feedforward=d_model * 4,
dropout=0.1,
batch_first=True,
norm_first=norm_first,
),
num_layers=num_layers,
norm=LayerNorm(d_model) if norm_first else None,
)
self.ar_predict_layer = nn.Linear(
d_model, NUM_AUDIO_TOKENS + 1, bias=False
)
self.num_heads = nhead
self.nar_decoder = decoder_cls(
decoder_layer_cls(
nar_d_model,
int(nhead * nar_scale_factor),
dim_feedforward=nar_d_model * 4,
dropout=0.1,
batch_first=True,
norm_first=norm_first,
adaptive_layer_norm=True,
),
num_layers=int(num_layers * nar_scale_factor),
norm=AdaptiveLayerNorm(nar_d_model, norm=nn.LayerNorm(nar_d_model))
if norm_first
else None,
)
self.nar_predict_layers = nn.ModuleList(
[
nn.Linear(nar_d_model, NUM_AUDIO_TOKENS, bias=False)
for i in range(7)
]
)
self.nar_stage_embeddings = nn.ModuleList(
[TokenEmbedding(nar_d_model, 1) for i in range(7)]
)
self.prefix_mode = prefix_mode
if share_embedding:
# We share the parameters of the output projection layer with the parameters of the acoustic embedding Wa
# NOTE(Feiteng): In the experiment, this undermines accuracy
# self.ar_predict_layer.weight = self.ar_audio_embedding.weight
# We also share the parameters of the acoustic embedding layer and the output prediction layer,
# which means the weights of the j-th prediction layer are the same as the (j + 1)-th acoustic embedding layer.
for j in range(0, 6):
self.nar_predict_layers[j].weight = self.nar_audio_embeddings[
j + 2
].weight
self.rng = random.Random(0)
self.ar_accuracy_metric = MulticlassAccuracy(
NUM_AUDIO_TOKENS + 1,
top_k=10,
average="micro",
multidim_average="global",
ignore_index=NUM_AUDIO_TOKENS,
)
self.nar_accuracy_metric = MulticlassAccuracy(
NUM_AUDIO_TOKENS + 1,
top_k=10,
average="micro",
multidim_average="global",
ignore_index=NUM_AUDIO_TOKENS,
)
# self.apply(self._init_weights)
# def _init_weights(self, module):
# if isinstance(module, (nn.Linear)):
# module.weight.data.normal_(mean=0.0, std=0.02)
# if isinstance(module, nn.Linear) and module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.LayerNorm):
# module.bias.data.zero_()
# module.weight.data.fill_(1.0)
# elif isinstance(module, nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=1.0)
def stage_parameters(self, stage: int = 1) -> Iterator[nn.Parameter]:
assert stage > 0
if stage == 1:
for name, param in self.named_parameters():
if name.startswith("ar_"):
print(f" AR parameter: {name}")
yield param
if stage == 2:
for name, param in self.named_parameters():
if name.startswith("nar_"):
print(f"NAR parameter: {name}")
yield param
def stage_named_parameters(
self, stage: int = 1
) -> Iterator[Tuple[str, nn.Parameter]]:
assert stage > 0
if stage == 1:
for pair in self.named_parameters():
if pair[0].startswith("ar_"):
yield pair
if stage == 2:
for pair in self.named_parameters():
if pair[0].startswith("nar_"):
yield pair
def _prepare_prompts(self, y, y_lens, codes, nar_stage, y_prompts_codes):
# 5.1 For the NAR acoustic prompt tokens, we select a random segment waveform of 3 seconds
# from the same utterance.
# We implement this differently.
if self.prefix_mode == 0:
# no prefix
prefix_len = 0
y_emb = self.nar_audio_embeddings[0](y)
for j in range(1, nar_stage):
# Formula (4) (5)
y_emb = y_emb + self.nar_audio_embeddings[j](codes[..., j])
elif self.prefix_mode == 1:
# prefix at begining
int_low = (0.25 * y_lens.min()).type(torch.int64).item()
prefix_len = torch.randint(int_low, int_low * 2, size=()).item()
prefix_len = min(prefix_len, 225) # 24000/320 * 3s = 225 frames
y_prompts = self.nar_audio_embeddings[0](y[:, :prefix_len])
y_emb = self.nar_audio_embeddings[0](y[:, prefix_len:])
for j in range(1, 8):
y_prompts += self.nar_audio_embeddings[j](
codes[:, :prefix_len, j]
)
if j < nar_stage:
y_emb += self.nar_audio_embeddings[j](
codes[:, prefix_len:, j]
)
y_emb = torch.concat([y_prompts, y_emb], axis=1)
elif self.prefix_mode in [2, 4]:
if self.prefix_mode == 2:
# random prefix
prefix_len = min(225, int(0.25 * y_lens.min().item()))
y_prompts_codes = []
for b in range(codes.shape[0]):
start = self.rng.randint(0, y_lens[b].item() - prefix_len)
y_prompts_codes.append(codes[b, start : start + prefix_len])
y_prompts_codes = torch.stack(y_prompts_codes, dim=0)
y_prompts = self.nar_audio_embeddings[0](y_prompts_codes[..., 0])
y_emb = self.nar_audio_embeddings[0](y)
for j in range(1, 8):
y_prompts += self.nar_audio_embeddings[j](
y_prompts_codes[..., j]
)
if j < nar_stage:
y_emb += self.nar_audio_embeddings[j](codes[..., j])
y_emb = torch.concat([y_prompts, y_emb], axis=1)
prefix_len = 0
else:
raise ValueError
return y_emb, prefix_len
def forward(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: Union[torch.Tensor, PromptedFeatures],
y_lens: Union[torch.Tensor, PromptedFeatures],
reduction: str = "sum",
train_stage: int = 0,
) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
x:
A 2-D tensor of shape (N, S).
x_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (N, T, 8).
y_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
train_stage:
0: AR & NAR modules, 1: AR modules, 2: NAR modules
Returns:
Return the predicted audio code matrix, cross-entropy loss and Top-10 accuracy.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
y_prompts_codes = None
if isinstance(y, PromptedFeatures):
y_prompts_codes, y = y.data
prompts_len, y_lens = y_lens.data
assert prompts_len.min() == prompts_len.max()
assert self.prefix_mode == 4
y_prompts_codes = y_prompts_codes.type(torch.int64)
assert y.ndim == 3, y.shape
assert y_lens.ndim == 1, y_lens.shape
# NOTE: x has been padded in TextTokenCollater
x_mask = make_pad_mask(x_lens).to(x.device)
text = x
x = self.ar_text_embedding(text)
x = self.ar_text_prenet(x)
x = self.ar_text_position(x)
total_loss, metrics = 0.0, {}
y_mask = make_pad_mask(y_lens).to(y.device)
y_mask_int = y_mask.type(torch.int64)
codes = y.type(torch.int64) * (1 - y_mask_int.unsqueeze(dim=-1))
# Training
# AR Decoder
def pad_y_eos(y, eos_id):
y = F.pad(y, (0, 1), value=0) + eos_id * F.pad(
y_mask_int, (0, 1), value=1
)
# inputs, targets
return y[:, :-1], y[:, 1:]
y, targets = pad_y_eos(codes[..., 0], eos_id=NUM_AUDIO_TOKENS)
if train_stage in [0, 1]:
y_emb = self.ar_audio_embedding(y)
y_emb = self.ar_audio_prenet(y_emb)
y_pos = self.ar_audio_position(y_emb)
y_len = y_lens.max()
tgt_mask = torch.triu(
torch.ones(y_len, y_len, device=y.device, dtype=torch.bool),
diagonal=1,
)
y_dec, _ = self.ar_decoder(
(y_pos, None),
x,
tgt_mask=tgt_mask,
tgt_key_padding_mask=y_mask,
memory_mask=None,
memory_key_padding_mask=x_mask,
)
logits = self.ar_predict_layer(y_dec).permute(0, 2, 1)
# loss
total_loss = F.cross_entropy(logits, targets, reduction=reduction)
metrics["ArTop10Accuracy"] = self.ar_accuracy_metric(
logits.detach(), targets
).item() * y_lens.sum().type(torch.float32)
# Non-AR Decoders
if train_stage in [0, 2]:
nar_stage = self.rng.choices(
(1, 2, 3, 4, 5, 6, 7), weights=[1.0 / 7] * 7, k=1
)[0]
x = self.nar_text_embedding(text)
x = self.nar_text_prenet(x)
x = self.nar_text_position(x)
y_len = y_lens.max()
targets = codes[..., nar_stage] + NUM_AUDIO_TOKENS * y_mask_int
y_emb, prefix_len = self._prepare_prompts(
y, y_lens, codes, nar_stage, y_prompts_codes
)
targets = targets[:, -(y_len - prefix_len) :]
if self.prefix_mode in [2, 4]:
y_mask = F.pad(y_mask, (y_emb.shape[1] - y_len, 0), value=False)
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
y_dec, _ = self.nar_decoder(
(y_pos, self.nar_stage_embeddings[nar_stage - 1].weight),
x,
tgt_mask=None,
tgt_key_padding_mask=y_mask,
memory_mask=None,
memory_key_padding_mask=x_mask,
)
logits = self.nar_predict_layers[nar_stage - 1](
y_dec[:, -(y_len - prefix_len) :]
).permute(0, 2, 1)
# loss
total_length = (y_lens).sum().type(torch.float32)
total_loss += F.cross_entropy(
logits,
targets,
ignore_index=NUM_AUDIO_TOKENS,
reduction=reduction,
)
metrics["NarTop10Accuracy"] = (
self.nar_accuracy_metric(
F.pad(
logits.detach(),
(0, 0, 0, 1, 0, 0),
value=logits.min().cpu().item(),
),
targets,
).item()
* (total_length / (total_length - prefix_len * x.shape[0]))
)
if train_stage == 0:
total_loss = total_loss / 2.0
return ((x, codes), total_loss, metrics)
def inference(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: torch.Tensor,
enroll_x_lens: Union[torch.Tensor, None] = None,
top_k: int = -100,
temperature: float = 1.0,
) -> torch.Tensor:
"""
Args:
x:
A 2-D tensor of shape (1, S).
x_lens:
A 1-D tensor of shape (1,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (1, T, 8).
top_k: (`optional`) int
The number of highest probability tokens to keep for top-k-filtering. Default to -100.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
Returns:
Return the predicted audio code matrix and cross-entropy loss.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert y.ndim == 3, y.shape
assert y.shape[0] == 1, y.shape
assert torch.all(x_lens > 0)
text = x
x = self.ar_text_embedding(text)
x = self.ar_text_prenet(x)
x = self.ar_text_position(x)
# NOTE: x has been padded in TextTokenCollater
x_mask = make_pad_mask(x_lens).to(x.device)
prompts = y
prefix_len = y.shape[1]
# AR Decoder
# TODO: Managing decoder steps avoid repetitive computation
y = prompts[..., 0]
while True:
y_emb = self.ar_audio_embedding(y)
y_emb = self.ar_audio_prenet(y_emb)
y_pos = self.ar_audio_position(y_emb)
tgt_mask = torch.triu(
torch.ones(
y.shape[1], y.shape[1], device=y.device, dtype=torch.bool
),
diagonal=1,
)
y_dec, _ = self.ar_decoder(
(y_pos, None),
x,
tgt_mask=tgt_mask,
memory_mask=None,
memory_key_padding_mask=x_mask,
)
logits = self.ar_predict_layer(y_dec[:, -1])
samples = topk_sampling(
logits, top_k=top_k, top_p=1.0, temperature=temperature
)
if (
torch.argmax(logits, dim=-1)[0] == NUM_AUDIO_TOKENS
or samples[0, 0] == NUM_AUDIO_TOKENS
or (y.shape[1] - prefix_len) > x_lens.max() * 16
):
if prompts.shape[1] == y.shape[1]:
y = torch.concat([y, samples], dim=1)
print(f"VALL-F EOS [{prefix_len} -> {y.shape[1]}]")
break
y = torch.concat([y, samples], dim=1)
codes = [y[:, prefix_len:]]
# Non-AR Decoders
y_emb = self.nar_audio_embeddings[0](y)
if self.prefix_mode in [2, 4]: # Exclude enrolled_phonemes
enrolled_len = enroll_x_lens.max().item()
# SOS + Synthesis Text + EOS
text = torch.concat(
[
text[:, :1],
text[:, enrolled_len - 1 :],
],
dim=1,
)
assert text.shape[0] == 1
x = self.nar_text_embedding(text)
x = self.nar_text_prenet(x)
x = self.nar_text_position(x)
if self.prefix_mode != 0:
for j in range(1, 8):
y_emb[:, :prefix_len] += self.nar_audio_embeddings[j](
prompts[..., j]
)
for i, (predict_layer, embedding_layer) in enumerate(
zip(
self.nar_predict_layers,
self.nar_audio_embeddings[1:],
)
):
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
y_dec, _ = self.nar_decoder(
(y_pos, self.nar_stage_embeddings[i].weight),
x,
tgt_mask=None,
memory_mask=None,
memory_key_padding_mask=None,
)
logits = predict_layer(y_dec[:, prefix_len:])
samples = torch.argmax(logits, dim=-1)
codes.append(samples)
# Formula (4) (5)
if i < 6:
if self.prefix_mode == 0:
y_emb[:, :prefix_len] += embedding_layer(
prompts[..., i + 1]
)
y_emb[:, prefix_len:] += embedding_layer(samples)
assert len(codes) == 8
return torch.stack(codes, dim=-1)
class VALLE(VALLF):
"""It implements https://arxiv.org/abs/2301.02111
"Neural Codec Language Models are Zero-Shot Text to Speech Synthesizers"
"""
def __init__(
self,
d_model: int,
nhead: int,
num_layers: int,
norm_first: bool = True,
add_prenet: bool = False,
prefix_mode: int = 0,
share_embedding: bool = True,
nar_scale_factor: float = 1.0,
):
"""
Args:
d_model:
The number of expected features in the input (required).
nhead:
The number of heads in the multiheadattention models (required).
num_layers:
The number of sub-decoder-layers in the decoder (required).
"""
super(VALLE, self).__init__(
d_model,
nhead,
num_layers,
norm_first=norm_first,
add_prenet=add_prenet,
decoder_cls=TransformerEncoder,
decoder_layer_cls=TransformerEncoderLayer,
prefix_mode=prefix_mode,
share_embedding=share_embedding,
nar_scale_factor=nar_scale_factor,
)
def forward(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: Union[torch.Tensor, PromptedFeatures],
y_lens: Union[torch.Tensor, PromptedFeatures],
reduction: str = "sum",
train_stage: int = 0,
) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
x:
A 2-D tensor of shape (N, S).
x_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (N, T, 8).
y_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
train_stage:
0: AR & NAR modules, 1: AR modules, 2: NAR modules
Returns:
Return the predicted audio code matrix, cross-entropy loss and Top-10 accuracy.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
y_prompts_codes = None
if isinstance(y, PromptedFeatures):
y_prompts_codes, y = y.data
prompts_len, y_lens = y_lens.data
assert prompts_len.min() == prompts_len.max()
assert self.prefix_mode == 4
y_prompts_codes = y_prompts_codes.type(torch.int64)
assert y.ndim == 3, y.shape
assert y_lens.ndim == 1, y_lens.shape
# NOTE: x has been padded in TextTokenCollater
x_mask = make_pad_mask(x_lens).to(x.device)
y_mask = make_pad_mask(y_lens).to(y.device)
xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
y_mask_int = y_mask.type(torch.int64)
text = x
codes = y.type(torch.int64) * (1 - y_mask_int.unsqueeze(dim=-1))
def pad_y_eos(y, eos_id):
y = F.pad(y, (0, 1)) + eos_id * F.pad(y_mask_int, (0, 1), value=1)
# inputs, targets
return y[:, :-1], y[:, 1:]
y, targets = pad_y_eos(codes[..., 0], eos_id=NUM_AUDIO_TOKENS)
x_len = x_lens.max()
metrics = {}
total_loss = 0.0
# AR Decoder
if train_stage in [0, 1]:
x = self.ar_text_embedding(text)
x = self.ar_text_prenet(x)
x = self.ar_text_position(x)
y_len = y_lens.max()
x_attn_mask = F.pad(
torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
(0, y_len),
value=True,
)
y_attn_mask = F.pad(
torch.triu(
torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
diagonal=1,
),
(x_len, 0),
value=False,
)
xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
# merge key padding and attention masks
bsz, src_len = x.shape[0], x_len + y_len
_xy_padding_mask = (
xy_padding_mask.view(bsz, 1, 1, src_len)
.expand(-1, self.num_heads, -1, -1)
.reshape(bsz * self.num_heads, 1, src_len)
)
xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
xy_attn_mask = new_attn_mask
y_emb = self.ar_audio_embedding(y)
y_emb = self.ar_audio_prenet(y_emb)
y_pos = self.ar_audio_position(y_emb)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.ar_decoder(
(xy_pos, None),
mask=xy_attn_mask,
# src_key_padding_mask=xy_padding_mask,
# is_causal=True,
)
logits = self.ar_predict_layer(xy_dec[:, x_len:]).permute(0, 2, 1)
# loss
total_loss = F.cross_entropy(logits, targets, reduction=reduction)
metrics["ArTop10Accuracy"] = self.ar_accuracy_metric(
logits.detach(), targets
).item() * y_lens.sum().type(torch.float32)
# Non-AR Decoders
if train_stage in [0, 2]:
nar_stage = self.rng.choices(
(1, 2, 3, 4, 5, 6, 7), weights=[1.0 / 7] * 7, k=1
)[0]
x = self.nar_text_embedding(text)
x = self.nar_text_prenet(x)
x = self.nar_text_position(x)
targets = codes[..., nar_stage] + NUM_AUDIO_TOKENS * y_mask_int
y_emb, prefix_len = self._prepare_prompts(
y, y_lens, codes, nar_stage, y_prompts_codes
)
y_len = y_lens.max()
targets = targets[:, -(y_len - prefix_len) :]
# VALL-E
if self.prefix_mode in [2, 4]:
xy_padding_mask = torch.concat(
[
x_mask,
F.pad(y_mask, (y_emb.shape[1] - y_len, 0), value=False),
],
dim=1,
)
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.nar_decoder(
(xy_pos, self.nar_stage_embeddings[nar_stage - 1].weight),
src_key_padding_mask=xy_padding_mask,
# is_causal=False,
)
logits = self.nar_predict_layers[nar_stage - 1](
xy_dec[:, -(y_len - prefix_len) :]
).permute(0, 2, 1)
# loss
total_length = (y_lens).sum().type(torch.float32)
total_loss += (
F.cross_entropy(
logits,
targets,
ignore_index=NUM_AUDIO_TOKENS,
reduction=reduction,
)
* (total_length / (total_length - prefix_len * x.shape[0]))
)
metrics["NarTop10Accuracy"] = (
self.nar_accuracy_metric(
F.pad(
logits.detach(),
(0, 0, 0, 1, 0, 0),
value=logits.min().cpu().item(),
),
targets,
).item()
* total_length
)
if train_stage == 0:
total_loss = total_loss / 2.0
return ((x, codes), total_loss, metrics)
def inference(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: torch.Tensor,
enroll_x_lens: torch.Tensor,
top_k: int = -100,
temperature: float = 1.0,
) -> torch.Tensor:
"""
Args:
x:
A 2-D tensor of shape (1, S).
x_lens:
A 1-D tensor of shape (1,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (1, T, 8).
top_k: (`optional`) int
The number of highest probability tokens to keep for top-k-filtering. Default to -100.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
Returns:
Return the predicted audio code matrix.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert y.ndim == 3, y.shape
assert y.shape[0] == 1, y.shape
assert torch.all(x_lens > 0)
# NOTE: x has been padded in TextTokenCollater
text = x
x = self.ar_text_embedding(text)
x = self.ar_text_prenet(x)
x = self.ar_text_position(x)
text_len = x_lens.max()
prompts = y
prefix_len = y.shape[1]
# AR Decoder
# TODO: Managing decoder steps avoid repetitive computation
y = prompts[..., 0]
x_len = x_lens.max()
x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
while True:
y_emb = self.ar_audio_embedding(y)
y_emb = self.ar_audio_prenet(y_emb)
y_pos = self.ar_audio_position(y_emb)
xy_pos = torch.concat([x, y_pos], dim=1)
y_len = y.shape[1]
x_attn_mask_pad = F.pad(
x_attn_mask,
(0, y_len),
value=True,
)
y_attn_mask = F.pad(
torch.triu(
torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1
),
(x_len, 0),
value=False,
)
xy_attn_mask = torch.concat(
[x_attn_mask_pad, y_attn_mask], dim=0
).to(y.device)
xy_dec, _ = self.ar_decoder(
(xy_pos, None),
mask=xy_attn_mask,
)
logits = self.ar_predict_layer(xy_dec[:, -1])
samples = topk_sampling(
logits, top_k=top_k, top_p=1.0, temperature=temperature
)
if (
torch.argmax(logits, dim=-1)[0] == NUM_AUDIO_TOKENS
or samples[0, 0] == NUM_AUDIO_TOKENS
or (y.shape[1] - prompts.shape[1]) > x_lens.max() * 16
):
if prompts.shape[1] == y.shape[1]:
y = torch.concat([y, samples], dim=1)
print(f"VALL-E EOS [{prompts.shape[1]} -> {y.shape[1]}]")
break
y = torch.concat([y, samples], dim=1)
codes = [y[:, prefix_len:]]
# Non-AR Decoders
y_emb = self.nar_audio_embeddings[0](y)
if self.prefix_mode in [2, 4]: # Exclude enrolled_phonemes
enrolled_len = enroll_x_lens.max().item()
# SOS + Synthesis Text + EOS
text = torch.concat(
[
text[:, :1],
text[:, enrolled_len - 1 :],
],
dim=1,
)
text_len = text_len - (enrolled_len - 2)
assert text.shape[0] == 1
x = self.nar_text_embedding(text)
x = self.nar_text_prenet(x)
x = self.nar_text_position(x)
if self.prefix_mode == 0:
for i, (predict_layer, embedding_layer) in enumerate(
zip(
self.nar_predict_layers,
self.nar_audio_embeddings[1:],
)
):
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.nar_decoder(
(xy_pos, self.nar_stage_embeddings[i].weight)
)
logits = predict_layer(xy_dec[:, text_len + prefix_len :])
samples = torch.argmax(logits, dim=-1)
codes.append(samples)
if i < 6:
y_emb[:, :prefix_len] += embedding_layer(
prompts[..., i + 1]
)
y_emb[:, prefix_len:] += embedding_layer(samples)
else:
for j in range(1, 8):
y_emb[:, :prefix_len] += self.nar_audio_embeddings[j](
prompts[..., j]
)
for i, (predict_layer, embedding_layer) in enumerate(
zip(
self.nar_predict_layers,
self.nar_audio_embeddings[1:],
)
):
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.nar_decoder(
(xy_pos, self.nar_stage_embeddings[i].weight)
)
logits = predict_layer(xy_dec[:, text_len + prefix_len :])
samples = torch.argmax(logits, dim=-1)
codes.append(samples)
if i < 6:
y_emb[:, prefix_len:] += embedding_layer(samples)
assert len(codes) == 8
return torch.stack(codes, dim=-1)
def continual(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: torch.Tensor,
) -> torch.Tensor:
"""
Args:
x:
A 2-D tensor of shape (1, S).
x_lens:
A 1-D tensor of shape (1,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (1, T, 8).
Returns:
Return the predicted audio code matrix.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert y.ndim == 3, y.shape
assert y.shape[0] == 1, y.shape
assert torch.all(x_lens > 0)
# NOTE: x has been padded in TextTokenCollater
text = x
x = self.ar_text_embedding(text)
x = self.ar_text_prenet(x)
x = self.ar_text_position(x)
text_len = x_lens.max()
prefix_len = min(int(y.shape[1] * 0.5), 3 * 75)
# AR Decoder
prompts = y[:, :prefix_len]
codes = [y[:, prefix_len:, 0]]
# Non-AR Decoders
x = self.nar_text_embedding(text)
x = self.nar_text_prenet(x)
x = self.nar_text_position(x)
y_emb = self.nar_audio_embeddings[0](y[..., 0])
if self.prefix_mode == 0:
for i, (predict_layer, embedding_layer) in enumerate(
zip(
self.nar_predict_layers,
self.nar_audio_embeddings[1:],
)
):
y_pos = self.nar_audio_position(y_emb)
y_pos = self.nar_audio_prenet(y_pos)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.nar_decoder(
(xy_pos, self.nar_stage_embeddings[i].weight)
)
logits = predict_layer(xy_dec[:, text_len + prefix_len :])
samples = torch.argmax(logits, dim=-1)
codes.append(samples)
if i < 6:
y_emb[:, :prefix_len] += embedding_layer(
prompts[..., i + 1]
)
y_emb[:, prefix_len:] += embedding_layer(samples)
else:
for j in range(1, 8):
y_emb[:, :prefix_len] += self.nar_audio_embeddings[j](
prompts[..., j]
)
for i, (predict_layer, embedding_layer) in enumerate(
zip(
self.nar_predict_layers,
self.nar_audio_embeddings[1:],
)
):
y_pos = self.nar_audio_prenet(y_emb)
y_pos = self.nar_audio_position(y_pos)
xy_pos = torch.concat([x, y_pos], dim=1)
xy_dec, _ = self.nar_decoder(
(xy_pos, self.nar_stage_embeddings[i].weight)
)
logits = predict_layer(xy_dec[:, text_len + prefix_len :])
samples = torch.argmax(logits, dim=-1)
codes.append(samples)
if i < 6:
y_emb[:, prefix_len:] += embedding_layer(samples)
assert len(codes) == 8
return torch.stack(codes, dim=-1)
# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
def top_k_top_p_filtering(
logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1
):
"""Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(
max(top_k, min_tokens_to_keep), logits.size(-1)
) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[
..., :-1
].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
logits[indices_to_remove] = filter_value
return logits
def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
# temperature: (`optional`) float
# The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
# top_k: (`optional`) int
# The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
# top_p: (`optional`) float
# The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
logits = logits / temperature
# Top-p/top-k filtering
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
# Sample
token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
return token
| EXA-1-master | exa/models/valle/vall-e-main/valle/models/valle.py |
import argparse
import torch.nn as nn
from icefall.utils import AttributeDict, str2bool
from .transformer import Transformer
from .valle import NUM_MEL_BINS, VALLE, VALLF
from .visualizer import visualize
def add_model_arguments(parser: argparse.ArgumentParser):
parser.add_argument(
"--model-name",
type=str,
default="VALL-E",
help="VALL-E, VALL-F or Transformer.",
)
parser.add_argument(
"--decoder-dim",
type=int,
default=1024,
help="Embedding dimension in the decoder model.",
)
parser.add_argument(
"--nhead",
type=int,
default=16,
help="Number of attention heads in the Decoder layers.",
)
parser.add_argument(
"--num-decoder-layers",
type=int,
default=12,
help="Number of Decoder layers.",
)
parser.add_argument(
"--scale-factor",
type=float,
default=1.0,
help="Model scale factor which will be assigned different meanings in different models.",
)
parser.add_argument(
"--norm-first",
type=str2bool,
default=True,
help="Pre or Post Normalization.",
)
parser.add_argument(
"--add-prenet",
type=str2bool,
default=False,
help="Whether add PreNet after Inputs.",
)
parser.add_argument(
"--prefix-mode",
type=int,
default=0,
help="The mode for how to prefix VALL-E NAR Decoder, "
"0: no prefix, 1: 0 to random, 2: random to random, 4: chunk of pre or post utterance.",
)
parser.add_argument(
"--share-embedding",
type=str2bool,
default=True,
help="Share the parameters of the output projection layer with the parameters of the acoustic embedding.",
)
def get_model(params: AttributeDict) -> nn.Module:
if params.model_name.lower() in ["vall-f", "vallf"]:
model = VALLF(
params.decoder_dim,
params.nhead,
params.num_decoder_layers,
norm_first=params.norm_first,
add_prenet=params.add_prenet,
prefix_mode=params.prefix_mode,
share_embedding=params.share_embedding,
nar_scale_factor=params.scale_factor,
)
elif params.model_name.lower() in ["vall-e", "valle"]:
model = VALLE(
params.decoder_dim,
params.nhead,
params.num_decoder_layers,
norm_first=params.norm_first,
add_prenet=params.add_prenet,
prefix_mode=params.prefix_mode,
share_embedding=params.share_embedding,
nar_scale_factor=params.scale_factor,
)
else:
assert params.model_name in ["Transformer"]
model = Transformer(
params.decoder_dim,
params.nhead,
params.num_decoder_layers,
norm_first=params.norm_first,
add_prenet=params.add_prenet,
)
return model
| EXA-1-master | exa/models/valle/vall-e-main/valle/models/__init__.py |
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from icefall.utils import make_pad_mask
from torchmetrics.classification import BinaryAccuracy
from valle.models.valle import NUM_TEXT_TOKENS, Transpose
from valle.modules.embedding import SinePositionalEmbedding, TokenEmbedding
NUM_MEL_BINS = 100 # BigVGAN bigvgan_24khz_100band
class Transformer(nn.Module):
"""It implements seq2seq Transformer TTS for debug(No StopPredictor and SpeakerEmbeding)
Neural Speech Synthesis with Transformer Network
https://arxiv.org/abs/1809.08895
"""
def __init__(
self,
d_model: int,
nhead: int,
num_layers: int,
norm_first: bool = True,
add_prenet: bool = False,
):
"""
Args:
d_model:
The number of expected features in the input (required).
nhead:
The number of heads in the multiheadattention models (required).
num_layers:
The number of sub-decoder-layers in the decoder (required).
"""
super().__init__()
self.text_embedding = TokenEmbedding(d_model, NUM_TEXT_TOKENS) # W_x
if add_prenet:
self.encoder_prenet = nn.Sequential(
Transpose(),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv1d(d_model, d_model, kernel_size=5, padding="same"),
nn.BatchNorm1d(d_model),
nn.ReLU(),
nn.Dropout(0.5),
Transpose(),
nn.Linear(d_model, d_model),
)
self.decoder_prenet = nn.Sequential(
nn.Linear(NUM_MEL_BINS, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, d_model),
)
else:
self.encoder_prenet = nn.Identity()
self.decoder_prenet = nn.Linear(NUM_MEL_BINS, d_model)
self.encoder_position = SinePositionalEmbedding(
d_model,
dropout=0.1,
scale=False,
)
self.decoder_position = SinePositionalEmbedding(
d_model, dropout=0.1, scale=False
)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=d_model * 4,
activation=F.relu,
dropout=0.1,
batch_first=True,
norm_first=norm_first,
),
num_layers=num_layers,
norm=nn.LayerNorm(d_model) if norm_first else None,
)
self.decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(
d_model,
nhead,
dim_feedforward=d_model * 4,
activation=F.relu,
dropout=0.1,
batch_first=True,
norm_first=norm_first,
),
num_layers=num_layers,
norm=nn.LayerNorm(d_model) if norm_first else None,
)
self.predict_layer = nn.Linear(d_model, NUM_MEL_BINS)
self.stop_layer = nn.Linear(d_model, 1)
self.stop_accuracy_metric = BinaryAccuracy(
threshold=0.5, multidim_average="global"
)
# self.apply(self._init_weights)
# def _init_weights(self, module):
# if isinstance(module, (nn.Linear)):
# module.weight.data.normal_(mean=0.0, std=0.02)
# if isinstance(module, nn.Linear) and module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, nn.LayerNorm):
# module.bias.data.zero_()
# module.weight.data.fill_(1.0)
# elif isinstance(module, nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=0.02)
def forward(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: torch.Tensor,
y_lens: torch.Tensor,
reduction: str = "sum",
train_stage: int = 0,
) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
x:
A 2-D tensor of shape (N, S).
x_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
y:
A 3-D tensor of shape (N, T, 8).
y_lens:
A 1-D tensor of shape (N,). It contains the number of tokens in `x`
before padding.
train_stage:
Not used in this model.
Returns:
Return the predicted audio code matrix, cross-entropy loss and Top-10 accuracy.
"""
del train_stage
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert y.ndim == 3, y.shape
assert y_lens.ndim == 1, y_lens.shape
assert torch.all(x_lens > 0)
# NOTE: x has been padded in TextTokenCollater
x_mask = make_pad_mask(x_lens).to(x.device)
x = self.text_embedding(x)
x = self.encoder_prenet(x)
x = self.encoder_position(x)
x = self.encoder(x, src_key_padding_mask=x_mask)
total_loss, metrics = 0.0, {}
y_mask = make_pad_mask(y_lens).to(y.device)
y_mask_float = y_mask.type(torch.float32)
data_mask = 1.0 - y_mask_float.unsqueeze(-1)
# Training
# AR Decoder
def pad_y(y):
y = F.pad(y, (0, 0, 1, 0, 0, 0), value=0).detach()
# inputs, targets
return y[:, :-1], y[:, 1:]
y, targets = pad_y(y * data_mask) # mask padding as zeros
y_emb = self.decoder_prenet(y)
y_pos = self.decoder_position(y_emb)
y_len = y_lens.max()
tgt_mask = torch.triu(
torch.ones(y_len, y_len, device=y.device, dtype=torch.bool),
diagonal=1,
)
y_dec = self.decoder(
y_pos,
x,
tgt_mask=tgt_mask,
memory_key_padding_mask=x_mask,
)
predict = self.predict_layer(y_dec)
# loss
total_loss = F.mse_loss(predict, targets, reduction=reduction)
logits = self.stop_layer(y_dec).squeeze(-1)
stop_loss = F.binary_cross_entropy_with_logits(
logits,
y_mask_float.detach(),
weight=1.0 + y_mask_float.detach() * 4.0,
reduction=reduction,
)
metrics["stop_loss"] = stop_loss.detach()
stop_accuracy = self.stop_accuracy_metric(
(torch.sigmoid(logits) >= 0.5).type(torch.int64),
y_mask.type(torch.int64),
)
# icefall MetricsTracker.norm_items()
metrics["stop_accuracy"] = stop_accuracy.item() * y_lens.sum().type(
torch.float32
)
return ((x, predict), total_loss + 100.0 * stop_loss, metrics)
def inference(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: Any = None,
**kwargs,
) -> torch.Tensor:
"""
Args:
x:
A 2-D tensor of shape (1, S).
x_lens:
A 1-D tensor of shape (1,). It contains the number of tokens in `x`
before padding.
Returns:
Return the predicted audio code matrix and cross-entropy loss.
"""
assert x.ndim == 2, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert torch.all(x_lens > 0)
x_mask = make_pad_mask(x_lens).to(x.device)
x = self.text_embedding(x)
x = self.encoder_prenet(x)
x = self.encoder_position(x)
x = self.encoder(x, src_key_padding_mask=x_mask)
x_mask = make_pad_mask(x_lens).to(x.device)
# AR Decoder
# TODO: Managing decoder steps avoid repetitive computation
y = torch.zeros(
[x.shape[0], 1, NUM_MEL_BINS], dtype=torch.float32, device=x.device
)
while True:
y_emb = self.decoder_prenet(y)
y_pos = self.decoder_position(y_emb)
tgt_mask = torch.triu(
torch.ones(
y.shape[1], y.shape[1], device=y.device, dtype=torch.bool
),
diagonal=1,
)
y_dec = self.decoder(
y_pos,
x,
tgt_mask=tgt_mask,
memory_mask=None,
memory_key_padding_mask=x_mask,
)
predict = self.predict_layer(y_dec[:, -1:])
logits = self.stop_layer(y_dec[:, -1:]) > 0 # sigmoid(0.0) = 0.5
if y.shape[1] > x_lens.max() * 10 or all(logits.cpu().numpy()):
print(
f"TransformerTTS EOS [Text {x_lens[0]} -> Audio {y.shape[1]}]"
)
break
y = torch.concat([y, predict], dim=1)
return y[:, 1:]
def visualize(
self,
fbank: torch.Tensor,
output_path: str,
) -> torch.Tensor:
"""
Args:
fbank:
A 3-D tensor of shape (N, T, NUM_MEL_BINS).
Returns:
None.
"""
import matplotlib.pyplot as plt
fbank = fbank.transpose(1, 2).cpu().numpy()
for b in range(fbank.shape[0]):
_ = plt.figure(figsize=(12, 6))
plt.imshow(
X=fbank[b],
cmap=plt.get_cmap("jet"),
aspect="auto",
interpolation="nearest",
)
plt.gca().invert_yaxis()
plt.savefig(f"{output_path}/{b}_mels.png")
plt.close()
| EXA-1-master | exa/models/valle/vall-e-main/valle/models/transformer.py |
#!/usr/bin/env python3
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
def visualize(
predicts: Tuple[torch.Tensor],
batch: Dict[str, Union[List, torch.Tensor]],
output_dir: str,
limit: int = 4,
) -> None:
text_tokens = batch["text_tokens"].to("cpu").detach().numpy()
text_tokens_lens = batch["text_tokens_lens"].to("cpu").detach().numpy()
audio_features = batch["audio_features"].to("cpu").detach().numpy()
audio_features_lens = (
batch["audio_features_lens"].to("cpu").detach().numpy()
)
assert text_tokens.ndim == 2
utt_ids, texts = batch["utt_id"], batch["text"]
encoder_outputs = predicts[0].to("cpu").detach().numpy()
decoder_outputs = predicts[1].to("cpu").detach().numpy()
vmin, vmax = 0, 1024 # Encodec
if decoder_outputs.dtype == np.float32:
vmin, vmax = -6, 0 # Fbank
num_figures = 3
for b, (utt_id, text) in enumerate(zip(utt_ids[:limit], texts[:limit])):
_ = plt.figure(figsize=(14, 8 * num_figures))
S = text_tokens_lens[b]
T = audio_features_lens[b]
# encoder
plt.subplot(num_figures, 1, 1)
plt.title(f"Text: {text}")
plt.imshow(
X=np.transpose(encoder_outputs[b]),
cmap=plt.get_cmap("jet"),
aspect="auto",
interpolation="nearest",
)
plt.gca().invert_yaxis()
plt.axvline(x=S - 0.4, linewidth=2, color="r")
plt.xlabel("Encoder Output")
plt.colorbar()
# decoder
plt.subplot(num_figures, 1, 2)
plt.imshow(
X=np.transpose(decoder_outputs[b]),
cmap=plt.get_cmap("jet"),
aspect="auto",
interpolation="nearest",
vmin=vmin,
vmax=vmax,
)
plt.gca().invert_yaxis()
plt.axvline(x=T - 0.4, linewidth=2, color="r")
plt.xlabel("Decoder Output")
plt.colorbar()
# target
plt.subplot(num_figures, 1, 3)
plt.imshow(
X=np.transpose(audio_features[b]),
cmap=plt.get_cmap("jet"),
aspect="auto",
interpolation="nearest",
vmin=vmin,
vmax=vmax,
)
plt.gca().invert_yaxis()
plt.axvline(x=T - 0.4, linewidth=2, color="r")
plt.xlabel("Decoder Target")
plt.colorbar()
plt.savefig(f"{output_dir}/{utt_id}_features.png")
plt.close()
| EXA-1-master | exa/models/valle/vall-e-main/valle/models/visualizer.py |
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
class TokenEmbedding(nn.Module):
def __init__(
self,
dim_model: int,
vocab_size: int,
dropout: float = 0.0,
):
super().__init__()
self.vocab_size = vocab_size
self.dim_model = dim_model
self.dropout = torch.nn.Dropout(p=dropout)
self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model)
@property
def weight(self) -> torch.Tensor:
return self.word_embeddings.weight
def embedding(self, index: int) -> torch.Tensor:
return self.word_embeddings.weight[index : index + 1]
def forward(self, x: torch.Tensor):
X = self.word_embeddings(x)
X = self.dropout(X)
return X
class SinePositionalEmbedding(nn.Module):
def __init__(
self,
dim_model: int,
dropout: float = 0.0,
scale: bool = False,
alpha: bool = False,
):
super().__init__()
self.dim_model = dim_model
self.x_scale = math.sqrt(dim_model) if scale else 1.0
self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
self.dropout = torch.nn.Dropout(p=dropout)
self.reverse = False
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, 4000))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.dim_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(
0, x.size(1), dtype=torch.float32
).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.dim_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.dim_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.extend_pe(x)
output = x.unsqueeze(-1) if x.ndim == 2 else x
output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
return self.dropout(output)
| EXA-1-master | exa/models/valle/vall-e-main/valle/modules/embedding.py |
EXA-1-master | exa/models/valle/vall-e-main/valle/modules/__init__.py |
|
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn import functional as F
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
from torch.nn.parameter import Parameter
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
``forward()`` will use a special optimized implementation if all of the following
conditions are met:
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
restriction will be loosened in the future.)
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
- training is disabled (using ``.eval()``)
- dropout is 0
- ``add_bias_kv`` is ``False``
- ``add_zero_attn`` is ``False``
- ``batch_first`` is ``True`` and the input is batched
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
- at most one of ``key_padding_mask`` or ``attn_mask`` is passed
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
nor ``attn_mask`` is passed
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
``query``/``key``/``value`` to represent padding more efficiently than using a
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
will be returned, and an additional speedup proportional to the fraction of the input
that is padding can be expected.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> # xdoctest: +SKIP
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = (
self.kdim == embed_dim and self.vdim == embed_dim
)
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs)
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(
torch.empty(3 * embed_dim, **factory_kwargs)
)
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim, embed_dim, bias=bias, **factory_kwargs
)
if add_bias_kv:
self.bias_k = Parameter(
torch.empty((1, 1, embed_dim), **factory_kwargs)
)
self.bias_v = Parameter(
torch.empty((1, 1, embed_dim), **factory_kwargs)
)
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
if key_padding_mask is not None:
_kpm_dtype = key_padding_mask.dtype
if _kpm_dtype != torch.bool and not torch.is_floating_point(
key_padding_mask
):
raise AssertionError(
"only bool and floating types of key_padding_mask are supported"
)
why_not_fast_path = ""
if not is_batched:
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif (
self.in_proj_bias is not None
and query.dtype != self.in_proj_bias.dtype
):
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif (
self.in_proj_weight is not None
and query.dtype != self.in_proj_weight.dtype
):
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.dropout:
why_not_fast_path = f"dropout was {self.dropout}, required zero"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif attn_mask is not None:
why_not_fast_path = "attn_mask was not None"
elif query.is_nested and key_padding_mask is not None:
why_not_fast_path = (
"key_padding_mask is not supported with NestedTensor input"
)
elif self.num_heads % 2 == 1:
why_not_fast_path = "num_heads is odd"
elif torch.is_autocast_enabled():
why_not_fast_path = "autocast is enabled"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif not all(
[
(x is None or x.is_cuda or "cpu" in str(x.device))
for x in tensor_args
]
):
why_not_fast_path = (
"some Tensor argument is neither CUDA nor CPU"
)
elif torch.is_grad_enabled() and any(
[x is not None and x.requires_grad for x in tensor_args]
):
why_not_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_fast_path:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
key_padding_mask
if key_padding_mask is not None
else attn_mask,
need_weights,
average_attn_weights,
1
if key_padding_mask is not None
else 0
if attn_mask is not None
else None,
)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, (
"MultiheadAttention does not support NestedTensor outside of its fast path. "
+ f"The fast path was not hit because {why_not_fast_path}"
)
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [
x.transpose(1, 0) for x in (query, key, value)
]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
| EXA-1-master | exa/models/valle/vall-e-main/valle/modules/activation.py |
import copy
import numbers
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from .activation import MultiheadAttention
_shape_t = Union[int, List[int], torch.Size]
class LayerNorm(nn.Module):
__constants__ = ["normalized_shape", "eps", "elementwise_affine"]
normalized_shape: Tuple[int, ...]
eps: float
elementwise_affine: bool
def __init__(
self,
normalized_shape: _shape_t,
eps: float = 1e-5,
elementwise_affine: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
# mypy error: incompatible types in assignment
normalized_shape = (normalized_shape,) # type: ignore[assignment]
self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(
torch.empty(self.normalized_shape, **factory_kwargs)
)
self.bias = nn.Parameter(
torch.empty(self.normalized_shape, **factory_kwargs)
)
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
if self.elementwise_affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
if isinstance(input, tuple):
input, embedding = input
return (
F.layer_norm(
input,
self.normalized_shape,
self.weight,
self.bias,
self.eps,
),
embedding,
)
assert embedding is None
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps
)
def extra_repr(self) -> str:
return (
"{normalized_shape}, eps={eps}, "
"elementwise_affine={elementwise_affine}".format(**self.__dict__)
)
class AdaptiveLayerNorm(nn.Module):
r"""Adaptive Layer Normalization"""
def __init__(self, d_model, norm) -> None:
super(AdaptiveLayerNorm, self).__init__()
self.project_layer = nn.Linear(d_model, 2 * d_model)
self.norm = norm
self.d_model = d_model
self.eps = self.norm.eps
def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
if isinstance(input, tuple):
input, embedding = input
weight, bias = torch.split(
self.project_layer(embedding),
split_size_or_sections=self.d_model,
dim=-1,
)
return (weight * self.norm(input) + bias, embedding)
weight, bias = torch.split(
self.project_layer(embedding),
split_size_or_sections=self.d_model,
dim=-1,
)
return weight * self.norm(input) + bias
class TransformerEncoderLayer(nn.Module):
__constants__ = ["batch_first", "norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5,
batch_first: bool = False,
norm_first: bool = False,
device=None,
dtype=None,
adaptive_layer_norm=False,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(
d_model,
nhead,
dropout=dropout,
batch_first=batch_first,
**factory_kwargs,
)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
activation = _get_activation_fn(activation)
# We can't test self.activation in forward() in TorchScript,
# so stash some information about it instead.
if activation is F.relu or isinstance(activation, torch.nn.ReLU):
self.activation_relu_or_gelu = 1
elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
self.activation_relu_or_gelu = 2
else:
self.activation_relu_or_gelu = 0
self.activation = activation
if adaptive_layer_norm:
norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm1 = AdaptiveLayerNorm(d_model, norm1)
self.norm2 = AdaptiveLayerNorm(d_model, norm2)
else:
self.norm1 = LayerNorm(
d_model, eps=layer_norm_eps, **factory_kwargs
)
self.norm2 = LayerNorm(
d_model, eps=layer_norm_eps, **factory_kwargs
)
def __setstate__(self, state):
super(TransformerEncoderLayer, self).__setstate__(state)
if not hasattr(self, "activation"):
self.activation = F.relu
def forward(
self,
src: Tensor,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
) -> Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if src_key_padding_mask is not None:
_skpm_dtype = src_key_padding_mask.dtype
if _skpm_dtype != torch.bool and not torch.is_floating_point(
src_key_padding_mask
):
raise AssertionError(
"only bool and floating types of key_padding_mask are supported"
)
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
# why_not_sparsity_fast_path = "TODO:"
x, stage_embedding = src
if self.norm_first:
x = x + self._sa_block(
self.norm1(x, stage_embedding),
src_mask,
src_key_padding_mask,
)
x = x + self._ff_block(self.norm2(x, stage_embedding))
else:
x = self.norm1(
x + self._sa_block(x, src_mask, src_key_padding_mask),
stage_embedding,
)
x = self.norm2(x + self._ff_block(x), stage_embedding)
return (x, stage_embedding)
# self-attention block
def _sa_block(
self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
) -> Tensor:
x = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False,
)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
class TransformerEncoder(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers. Users can build the
BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
enable_nested_tensor: if True, input will automatically convert to nested tensor
(and convert back on output). This will improve the overall performance of
TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
Examples::
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ["norm"]
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
return_layer_states: bool = False,
) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
return_layer_states: return layers' state (optional).
Shape:
see the docs in Transformer class.
"""
if return_layer_states:
output = src
for mod in self.layers:
output = mod(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
)
if self.norm is not None:
output = self.norm(output)
return output
output = src
for mod in self.layers:
output = mod(
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoderLayer(nn.Module):
__constants__ = ["batch_first", "norm_first"]
def __init__(
self,
d_model: int,
nhead: int,
dim_feedforward: int = 2048,
dropout: float = 0.1,
activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
layer_norm_eps: float = 1e-5,
batch_first: bool = False,
norm_first: bool = False,
device=None,
dtype=None,
adaptive_layer_norm=False,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(TransformerDecoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(
d_model,
nhead,
dropout=dropout,
batch_first=batch_first,
**factory_kwargs,
)
self.multihead_attn = nn.MultiheadAttention(
d_model,
nhead,
dropout=dropout,
batch_first=batch_first,
**factory_kwargs,
)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.norm_first = norm_first
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
# Legacy string support for activation function.
if isinstance(activation, str):
self.activation = _get_activation_fn(activation)
else:
self.activation = activation
if adaptive_layer_norm:
norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
norm3 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
self.norm1 = AdaptiveLayerNorm(d_model, norm1)
self.norm2 = AdaptiveLayerNorm(d_model, norm2)
self.norm3 = AdaptiveLayerNorm(d_model, norm3)
else:
self.norm1 = LayerNorm(
d_model, eps=layer_norm_eps, **factory_kwargs
)
self.norm2 = LayerNorm(
d_model, eps=layer_norm_eps, **factory_kwargs
)
self.norm3 = LayerNorm(
d_model, eps=layer_norm_eps, **factory_kwargs
)
def forward(
self,
tgt: Tensor,
memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
x, stage_embedding = tgt
if self.norm_first:
x = x + self._sa_block(
self.norm1(x, stage_embedding), tgt_mask, tgt_key_padding_mask
)
x = x + self._mha_block(
self.norm2(x, stage_embedding),
memory,
memory_mask,
memory_key_padding_mask,
)
x = x + self._ff_block(self.norm3(x, stage_embedding))
else:
x = self.norm1(
x + self._sa_block(x, tgt_mask, tgt_key_padding_mask),
stage_embedding,
)
x = self.norm2(
x
+ self._mha_block(
x, memory, memory_mask, memory_key_padding_mask
),
stage_embedding,
)
x = self.norm3(x + self._ff_block(x), stage_embedding)
return (x, stage_embedding)
# self-attention block
def _sa_block(
self,
x: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
) -> Tensor:
x = self.self_attn(
x,
x,
x,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False,
)[0]
return self.dropout1(x)
# multihead attention block
def _mha_block(
self,
x: Tensor,
mem: Tensor,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
) -> Tensor:
x = self.multihead_attn(
x,
mem,
mem,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=False,
)[0]
return self.dropout2(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout3(x)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError(
"activation should be relu/gelu, not {}".format(activation)
)
| EXA-1-master | exa/models/valle/vall-e-main/valle/modules/transformer.py |
#!/usr/bin/env python3
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from valle.modules.optim import Eden
def calc_lr(step, dim_embed, warmup_steps):
return dim_embed ** (-0.5) * min(
step ** (-0.5), step * warmup_steps ** (-1.5)
)
class NoamScheduler(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
base_lr: float,
optimizer: torch.optim.Optimizer,
dim_embed: int,
warmup_steps: int,
last_epoch: int = -1,
verbose: bool = False,
) -> None:
self.dim_embed = dim_embed
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.num_param_groups = len(optimizer.param_groups)
super().__init__(optimizer, last_epoch, verbose)
def get_lr(self) -> float:
lr = self.base_lr * calc_lr(
self._step_count, self.dim_embed, self.warmup_steps
)
return [lr] * self.num_param_groups
def set_step(self, step: int):
self._step_count = step
def get_scheduler(params, optimizer):
if params.scheduler_name.lower() == "eden":
scheduler = Eden(optimizer, 5000, 4, warmup_batches=params.warmup_steps)
elif params.scheduler_name.lower() == "noam":
scheduler = NoamScheduler(
params.base_lr,
optimizer,
params.decoder_dim,
warmup_steps=params.warmup_steps,
)
# scheduler.set_step(params.start_batch or params.batch_idx_train)
elif params.scheduler_name.lower() == "cosine":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
params.warmup_steps,
optimizer,
eta_min=params.base_lr,
)
else:
raise NotImplementedError(f"{params.scheduler_name}")
return scheduler
| EXA-1-master | exa/models/valle/vall-e-main/valle/modules/scheduler.py |
# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
#
# See ../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import random
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import torch
from lhotse.utils import fix_random_seed
from torch import Tensor
from torch.optim import Optimizer
class BatchedOptimizer(Optimizer):
"""
This class adds to class Optimizer the capability to optimize parameters in batches:
it will stack the parameters and their grads for you so the optimizer can work
on tensors with an extra leading dimension. This is intended for speed with GPUs,
as it reduces the number of kernels launched in the optimizer.
Args:
params:
"""
def __init__(self, params, defaults):
super(BatchedOptimizer, self).__init__(params, defaults)
@contextlib.contextmanager
def batched_params(self, param_group, group_params_names):
"""
This function returns (technically, yields) a list of
of tuples (p, state), where
p is a `fake` parameter that is stacked (over axis 0) from real parameters
that share the same shape, and its gradient is also stacked;
`state` is the state corresponding to this batch of parameters
(it will be physically located in the "state" for one of the real
parameters, the last one that has any particular shape and dtype).
This function is decorated as a context manager so that it can
write parameters back to their "real" locations.
The idea is, instead of doing:
<code>
for p in group["params"]:
state = self.state[p]
...
</code>
you can do:
<code>
with self.batched_params(group["params"]) as batches:
for p, state, p_names in batches:
...
</code>
Args:
group: a parameter group, which is a list of parameters; should be
one of self.param_groups.
group_params_names: name for each parameter in group,
which is List[str].
"""
batches = defaultdict(
list
) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
batches_names = defaultdict(
list
) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
assert len(param_group) == len(group_params_names)
for p, named_p in zip(param_group, group_params_names):
key = (str(p.dtype), *p.shape)
batches[key].append(p)
batches_names[key].append(named_p)
batches_names_keys = list(batches_names.keys())
sorted_idx = sorted(
range(len(batches_names)), key=lambda i: batches_names_keys[i]
)
batches_names = [
batches_names[batches_names_keys[idx]] for idx in sorted_idx
]
batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
stacked_params_dict = dict()
# turn batches into a list, in deterministic order.
# tuples will contain tuples of (stacked_param, state, stacked_params_names),
# one for each batch in `batches`.
tuples = []
for batch, batch_names in zip(batches, batches_names):
p = batch[0]
# we arbitrarily store the state in the
# state corresponding to the 1st parameter in the
# group. class Optimizer will take care of saving/loading state.
state = self.state[p]
p_stacked = torch.stack(batch)
grad = torch.stack(
[
torch.zeros_like(p) if p.grad is None else p.grad
for p in batch
]
)
p_stacked.grad = grad
stacked_params_dict[key] = p_stacked
tuples.append((p_stacked, state, batch_names))
yield tuples # <-- calling code will do the actual optimization here!
for ((stacked_params, _state, _names), batch) in zip(tuples, batches):
for i, p in enumerate(batch): # batch is list of Parameter
p.copy_(stacked_params[i])
class ScaledAdam(BatchedOptimizer):
"""
Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
proportional to the norm of that parameter; and also learn the scale of the parameter,
in log space, subject to upper and lower limits (as if we had factored each parameter as
param = underlying_param * log_scale.exp())
Args:
params: The parameters or param_groups to optimize (like other Optimizer subclasses)
lr: The learning rate. We will typically use a learning rate schedule that starts
at 0.03 and decreases over time, i.e. much higher than other common
optimizers.
clipping_scale: (e.g. 2.0)
A scale for gradient-clipping: if specified, the normalized gradients
over the whole model will be clipped to have 2-norm equal to
`clipping_scale` times the median 2-norm over the most recent period
of `clipping_update_period` minibatches. By "normalized gradients",
we mean after multiplying by the rms parameter value for this tensor
[for non-scalars]; this is appropriate because our update is scaled
by this quantity.
betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
Must satisfy 0 < beta <= beta2 < 1.
scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
scale of each parameter tensor and scalar parameters of the mode..
If each parameter were decomposed
as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
would be a the scaling factor on the learning rate of p_scale.
eps: A general-purpose epsilon to prevent division by zero
param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
learning the scale on the parameters (we'll constrain the rms of each non-scalar
parameter tensor to be >= this value)
param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
learning the scale on the parameters (we'll constrain the rms of each non-scalar
parameter tensor to be <= this value)
scalar_max: Maximum absolute value for scalar parameters (applicable if your
model has any parameters with numel() == 1).
size_update_period: The periodicity, in steps, with which we update the size (scale)
of the parameter tensor. This is provided to save a little time
in the update.
clipping_update_period: if clipping_scale is specified, this is the period
"""
def __init__(
self,
params,
lr=3e-02,
clipping_scale=None,
betas=(0.9, 0.98),
scalar_lr_scale=0.1,
eps=1.0e-08,
param_min_rms=1.0e-05,
param_max_rms=3.0,
scalar_max=10.0,
size_update_period=4,
clipping_update_period=100,
parameters_names=None,
show_dominant_parameters=True,
):
assert parameters_names is not None, (
"Please prepare parameters_names,"
"which is a List[List[str]]. Each List[str] is for a group"
"and each str is for a parameter"
)
defaults = dict(
lr=lr,
clipping_scale=clipping_scale,
betas=betas,
scalar_lr_scale=scalar_lr_scale,
eps=eps,
param_min_rms=param_min_rms,
param_max_rms=param_max_rms,
scalar_max=scalar_max,
size_update_period=size_update_period,
clipping_update_period=clipping_update_period,
)
super(ScaledAdam, self).__init__(params, defaults)
assert len(self.param_groups) == len(parameters_names)
self.parameters_names = parameters_names
self.show_dominant_parameters = show_dominant_parameters
def __setstate__(self, state):
super(ScaledAdam, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
batch = True
for group, group_params_names in zip(
self.param_groups, self.parameters_names
):
with self.batched_params(
group["params"], group_params_names
) as batches:
# batches is list of pairs (stacked_param, state). stacked_param is like
# a regular parameter, and will have a .grad, but the 1st dim corresponds to
# a stacking dim, it is not a real dim.
if (
len(batches[0][1]) == 0
): # if len(first state) == 0: not yet initialized
clipping_scale = 1
else:
clipping_scale = self._get_clipping_scale(group, batches)
for p, state, _ in batches:
# Perform optimization step.
# grad is not going to be None, we handled that when creating the batches.
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"ScaledAdam optimizer does not support sparse gradients"
)
# State initialization
if len(state) == 0:
self._init_state(group, p, state)
self._step_one_batch(group, p, state, clipping_scale)
return loss
def _init_state(self, group: dict, p: Tensor, state: dict):
"""
Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
is actually the batch dimension, corresponding to batched-together
parameters of a given shape.
Args:
group: Dict to look up configuration values.
p: The parameter that we are initializing the state for
state: Dict from string to whatever state we are initializing
"""
size_update_period = group["size_update_period"]
state["step"] = 0
kwargs = {"device": p.device, "dtype": p.dtype}
# 'delta' implements conventional momentum. There are
# several different kinds of update going on, so rather than
# compute "exp_avg" like in Adam, we store and decay a
# parameter-change "delta", which combines all forms of
# update. this is equivalent to how it's done in Adam,
# except for the first few steps.
state["delta"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
batch_size = p.shape[0]
numel = p.numel() // batch_size
numel = p.numel()
if numel > 1:
# "param_rms" just periodically records the scalar root-mean-square value of
# the parameter tensor.
# it has a shape like (batch_size, 1, 1, 1, 1)
param_rms = (
(p ** 2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt()
)
state["param_rms"] = param_rms
state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
state["scale_grads"] = torch.zeros(
size_update_period, *param_rms.shape, **kwargs
)
# exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
def _get_clipping_scale(
self, group: dict, tuples: List[Tuple[Tensor, dict, List[str]]]
) -> float:
"""
Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
by this amount before applying the rest of the update.
Args:
group: the parameter group, an item in self.param_groups
tuples: a list of tuples of (param, state, param_names)
where param is a batched set of parameters,
with a .grad (1st dim is batch dim)
and state is the state-dict where optimization parameters are kept.
param_names is a List[str] while each str is name for a parameter
in batched set of parameters "param".
"""
assert len(tuples) >= 1
clipping_scale = group["clipping_scale"]
(first_p, first_state, _) = tuples[0]
step = first_state["step"]
if clipping_scale is None or step == 0:
# no clipping. return early on step == 0 because the other
# parameters' state won't have been initialized yet.
return 1.0
clipping_update_period = group["clipping_update_period"]
tot_sumsq = torch.tensor(0.0, device=first_p.device)
for (p, state, param_names) in tuples:
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"ScaledAdam optimizer does not support sparse gradients"
)
if p.numel() == p.shape[0]: # a batch of scalars
tot_sumsq += (
grad ** 2
).sum() # sum() to change shape [1] to []
else:
tot_sumsq += ((grad * state["param_rms"]) ** 2).sum()
tot_norm = tot_sumsq.sqrt()
if "model_norms" not in first_state:
first_state["model_norms"] = torch.zeros(
clipping_update_period, device=p.device
)
first_state["model_norms"][step % clipping_update_period] = tot_norm
if step % clipping_update_period == 0:
# Print some stats.
# We don't reach here if step == 0 because we would have returned
# above.
sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
quartiles = []
for n in range(0, 5):
index = min(
clipping_update_period - 1,
(clipping_update_period // 4) * n,
)
quartiles.append(sorted_norms[index].item())
median = quartiles[2]
threshold = clipping_scale * median
first_state["model_norm_threshold"] = threshold
percent_clipped = (
first_state["num_clipped"] * 100.0 / clipping_update_period
if "num_clipped" in first_state
else 0.0
)
first_state["num_clipped"] = 0
quartiles = " ".join(["%.3e" % x for x in quartiles])
logging.info(
f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, "
f"threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
)
if step < clipping_update_period:
return 1.0 # We have not yet estimated a norm to clip to.
else:
try:
model_norm_threshold = first_state["model_norm_threshold"]
except KeyError:
logging.info(
"Warning: model_norm_threshold not in state: possibly "
"you changed config when restarting, adding clipping_scale option?"
)
return 1.0
ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
if ans < 1.0:
first_state["num_clipped"] += 1
if ans < 0.1:
logging.warn(
f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}"
)
if self.show_dominant_parameters:
assert p.shape[0] == len(param_names)
self._show_gradient_dominating_parameter(tuples, tot_sumsq)
return ans
def _show_gradient_dominating_parameter(
self, tuples: List[Tuple[Tensor, dict, List[str]]], tot_sumsq: Tensor
):
"""
Show information of parameter wihch dominanting tot_sumsq.
Args:
tuples: a list of tuples of (param, state, param_names)
where param is a batched set of parameters,
with a .grad (1st dim is batch dim)
and state is the state-dict where optimization parameters are kept.
param_names is a List[str] while each str is name for a parameter
in batched set of parameters "param".
tot_sumsq: sumsq of all parameters. Though it's could be calculated
from tuples, we still pass it to save some time.
"""
all_sumsq_orig = {}
for (p, state, batch_param_names) in tuples:
# p is a stacked batch parameters.
batch_grad = p.grad
if p.numel() == p.shape[0]: # a batch of scalars
batch_sumsq_orig = batch_grad ** 2
# Dummpy values used by following `zip` statement.
batch_rms_orig = torch.ones(p.shape[0])
else:
batch_rms_orig = state["param_rms"]
batch_sumsq_orig = ((batch_grad * batch_rms_orig) ** 2).sum(
dim=list(range(1, batch_grad.ndim))
)
for name, sumsq_orig, rms, grad in zip(
batch_param_names, batch_sumsq_orig, batch_rms_orig, batch_grad
):
proportion_orig = sumsq_orig / tot_sumsq
all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
assert torch.isclose(
sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
torch.tensor(1.0),
)
sorted_by_proportion = {
k: v
for k, v in sorted(
all_sumsq_orig.items(),
key=lambda item: item[1][0],
reverse=True,
)
}
dominant_param_name = next(iter(sorted_by_proportion))
(
dominant_proportion,
dominant_sumsq,
dominant_rms,
dominant_grad,
) = sorted_by_proportion[dominant_param_name]
logging.info(
f"Parameter Dominanting tot_sumsq {dominant_param_name}"
f" with proportion {dominant_proportion:.2f},"
f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
f"={dominant_sumsq:.3e},"
f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
f" orig_rms_sq={(dominant_rms**2).item():.3e}"
)
def _step_one_batch(
self, group: dict, p: Tensor, state: dict, clipping_scale: float
):
"""
Do the step for one parameter, which is actually going to be a batch of
`real` parameters, with dim 0 as the batch dim.
Args:
group: dict to look up configuration values
p: parameter to update (actually multiple parameters stacked together
as a batch)
state: state-dict for p, to look up the optimizer state
"""
lr = group["lr"]
size_update_period = group["size_update_period"]
beta1 = group["betas"][0]
grad = p.grad
if clipping_scale != 1.0:
grad = grad * clipping_scale
step = state["step"]
delta = state["delta"]
delta.mul_(beta1)
batch_size = p.shape[0]
numel = p.numel() // batch_size
if numel > 1:
# Update the size/scale of p, and set param_rms
scale_grads = state["scale_grads"]
scale_grads[step % size_update_period] = (p * grad).sum(
dim=list(range(1, p.ndim)), keepdim=True
)
if step % size_update_period == size_update_period - 1:
param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
param_rms.copy_(
(p ** 2)
.mean(dim=list(range(1, p.ndim)), keepdim=True)
.sqrt()
)
if step > 0:
# self._size_update() learns the overall scale on the
# parameter, by shrinking or expanding it.
self._size_update(group, scale_grads, p, state)
if numel == 1:
# For parameters with 1 element we just use regular Adam.
# Updates delta.
self._step_scalar(group, p, state)
else:
self._step(group, p, state)
state["step"] = step + 1
def _size_update(
self, group: dict, scale_grads: Tensor, p: Tensor, state: dict
) -> None:
"""
Called only where p.numel() > 1, this updates the scale of the parameter.
If we imagine: p = underlying_param * scale.exp(), and we are doing
gradient descent on underlying param and on scale, this function does the update
on `scale`.
Args:
group: dict to look up configuration values
scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
grads w.r.t. the scales.
p: The parameter to update
state: The state-dict of p
"""
param_rms = state["param_rms"]
beta1, beta2 = group["betas"]
size_lr = group["lr"] * group["scalar_lr_scale"]
param_min_rms = group["param_min_rms"]
param_max_rms = group["param_max_rms"]
eps = group["eps"]
step = state["step"]
batch_size = p.shape[0]
size_update_period = scale_grads.shape[0]
# correct beta2 for the size update period: we will have
# faster decay at this level.
beta2_corr = beta2 ** size_update_period
scale_exp_avg_sq = state[
"scale_exp_avg_sq"
] # shape: (batch_size, 1, 1, ..)
scale_exp_avg_sq.mul_(beta2_corr).add_(
(scale_grads ** 2).mean(
dim=0
), # mean over dim `size_update_period`
alpha=1 - beta2_corr,
) # shape is (batch_size, 1, 1, ...)
# The 1st time we reach here is when size_step == 1.
size_step = (step + 1) // size_update_period
bias_correction2 = 1 - beta2_corr ** size_step
# we don't bother with bias_correction1; this will help prevent divergence
# at the start of training.
denom = scale_exp_avg_sq.sqrt() + eps
scale_step = (
-size_lr
* (bias_correction2 ** 0.5)
* scale_grads.sum(dim=0)
/ denom
)
is_too_small = param_rms < param_min_rms
is_too_large = param_rms > param_max_rms
# when the param gets too small, just don't shrink it any further.
scale_step.masked_fill_(is_too_small, 0.0)
# when it gets too large, stop it from getting any larger.
scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
delta = state["delta"]
# the factor of (1-beta1) relates to momentum.
delta.add_(p * scale_step, alpha=(1 - beta1))
def _step(self, group: dict, p: Tensor, state: dict):
"""
This function does the core update of self.step(), in the case where the members of
the batch have more than 1 element.
Args:
group: A dict which will be used to look up configuration values
p: The parameter to be updated
grad: The grad of p
state: The state-dict corresponding to parameter p
This function modifies p.
"""
grad = p.grad
lr = group["lr"]
beta1, beta2 = group["betas"]
eps = group["eps"]
param_min_rms = group["param_min_rms"]
step = state["step"]
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
this_step = state["step"] - (
state["zero_step"] if "zero_step" in state else 0
)
bias_correction2 = 1 - beta2 ** (this_step + 1)
if bias_correction2 < 0.99:
# note: not in-place.
exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
denom = exp_avg_sq.sqrt()
denom += eps
grad = grad / denom
alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
delta = state["delta"]
delta.add_(grad * alpha)
p.add_(delta)
def _step_scalar(self, group: dict, p: Tensor, state: dict):
"""
A simplified form of the core update for scalar tensors, where we cannot get a good
estimate of the parameter rms.
"""
beta1, beta2 = group["betas"]
scalar_max = group["scalar_max"]
eps = group["eps"]
lr = group["lr"] * group["scalar_lr_scale"]
grad = p.grad
exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# bias_correction2 is like in Adam. Don't bother with bias_correction1;
# slower update at the start will help stability anyway.
bias_correction2 = 1 - beta2 ** (state["step"] + 1)
denom = (exp_avg_sq / bias_correction2).sqrt() + eps
delta = state["delta"]
delta.add_(grad / denom, alpha=-lr * (1 - beta1))
p.clamp_(min=-scalar_max, max=scalar_max)
p.add_(delta)
class LRScheduler(object):
"""
Base-class for learning rate schedulers where the learning-rate depends on both the
batch and the epoch.
"""
def __init__(self, optimizer: Optimizer, verbose: bool = False):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(
"{} is not an Optimizer".format(type(optimizer).__name__)
)
self.optimizer = optimizer
self.verbose = verbose
for group in optimizer.param_groups:
group.setdefault("base_lr", group["lr"])
self.base_lrs = [group["base_lr"] for group in optimizer.param_groups]
self.epoch = 0
self.batch = 0
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {
"base_lrs": self.base_lrs,
"epoch": self.epoch,
"batch": self.batch,
}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self) -> List[float]:
"""Return last computed learning rate by current scheduler. Will be a list of float."""
return self._last_lr
def get_lr(self):
# Compute list of learning rates from self.epoch and self.batch and
# self.base_lrs; this must be overloaded by the user.
# e.g. return [some_formula(self.batch, self.epoch, base_lr) for base_lr in self.base_lrs ]
raise NotImplementedError
def step_batch(self, batch: Optional[int] = None) -> None:
# Step the batch index, or just set it. If `batch` is specified, it
# must be the batch index from the start of training, i.e. summed over
# all epochs.
# You can call this in any order; if you don't provide 'batch', it should
# of course be called once per batch.
if batch is not None:
self.batch = batch
else:
self.batch = self.batch + 1
self._set_lrs()
def step_epoch(self, epoch: Optional[int] = None):
# Step the epoch index, or just set it. If you provide the 'epoch' arg,
# you should call this at the start of the epoch; if you don't provide the 'epoch'
# arg, you should call it at the end of the epoch.
if epoch is not None:
self.epoch = epoch
else:
self.epoch = self.epoch + 1
self._set_lrs()
def _set_lrs(self):
values = self.get_lr()
assert len(values) == len(self.optimizer.param_groups)
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
param_group["lr"] = lr
self.print_lr(self.verbose, i, lr)
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
def print_lr(self, is_verbose, group, lr):
"""Display the current learning rate."""
if is_verbose:
logging.info(
f"Epoch={self.epoch}, batch={self.batch}: adjusting learning rate"
f" of group {group} to {lr:.4e}."
)
class Eden(LRScheduler):
"""
Eden scheduler.
The basic formula (before warmup) is:
lr = base_lr * (((batch**2 + lr_batches**2) / lr_batches**2) ** -0.25 *
(((epoch**2 + lr_epochs**2) / lr_epochs**2) ** -0.25)) * warmup
where `warmup` increases from linearly 0.5 to 1 over `warmup_batches` batches
and then stays constant at 1.
E.g. suggest base_lr = 0.04 (passed to optimizer) if used with ScaledAdam
Args:
optimizer: the optimizer to change the learning rates on
lr_batches: the number of batches after which we start significantly
decreasing the learning rate, suggest 5000.
lr_epochs: the number of epochs after which we start significantly
decreasing the learning rate, suggest 6 if you plan to do e.g.
20 to 40 epochs, but may need smaller number if dataset is huge
and you will do few epochs.
"""
def __init__(
self,
optimizer: Optimizer,
lr_batches: Union[int, float],
lr_epochs: Union[int, float],
warmup_batches: Union[int, float] = 500.0,
verbose: bool = False,
):
super(Eden, self).__init__(optimizer, verbose)
self.lr_batches = lr_batches
self.lr_epochs = lr_epochs
self.warmup_batches = warmup_batches
def get_lr(self):
factor = (
(self.batch ** 2 + self.lr_batches ** 2) / self.lr_batches ** 2
) ** -0.25 * (
((self.epoch ** 2 + self.lr_epochs ** 2) / self.lr_epochs ** 2)
** -0.25
)
warmup_factor = (
1.0
if self.batch >= self.warmup_batches
else 0.5 + 0.5 * (self.batch / self.warmup_batches)
)
return [x * factor * warmup_factor for x in self.base_lrs]
def _test_eden():
m = torch.nn.Linear(100, 100)
optim = ScaledAdam(m.parameters(), lr=0.03)
scheduler = Eden(optim, lr_batches=100, lr_epochs=2, verbose=True)
for epoch in range(10):
scheduler.step_epoch(epoch) # sets epoch to `epoch`
for step in range(20):
x = torch.randn(200, 100).detach()
x.requires_grad = True
y = m(x)
dy = torch.randn(200, 100).detach()
f = (y * dy).sum()
f.backward()
optim.step()
scheduler.step_batch()
optim.zero_grad()
logging.info(f"last lr = {scheduler.get_last_lr()}")
logging.info(f"state dict = {scheduler.state_dict()}")
# This is included mostly as a baseline for ScaledAdam.
class Eve(Optimizer):
"""
Implements Eve algorithm. This is a modified version of AdamW with a special
way of setting the weight-decay / shrinkage-factor, which is designed to make the
rms of the parameters approach a particular target_rms (default: 0.1). This is
for use with networks with 'scaled' versions of modules (see scaling.py), which
will be close to invariant to the absolute scale on the parameter matrix.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Eve is unpublished so far.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 3e-4;
this value means that the weight would decay significantly after
about 3k minibatches. Is not multiplied by learning rate, but
is conditional on RMS-value of parameter being > target_rms.
target_rms (float, optional): target root-mean-square value of
parameters, if they fall below this we will stop applying weight decay.
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.98),
eps=1e-8,
weight_decay=1e-3,
target_rms=0.1,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1])
)
if not 0 <= weight_decay <= 0.1:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay)
)
if not 0 < target_rms <= 10.0:
raise ValueError("Invalid target_rms value: {}".format(target_rms))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
target_rms=target_rms,
)
super(Eve, self).__init__(params, defaults)
def __setstate__(self, state):
super(Eve, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"AdamW does not support sparse gradients"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() * (bias_correction2 ** -0.5)).add_(
group["eps"]
)
step_size = group["lr"] / bias_correction1
target_rms = group["target_rms"]
weight_decay = group["weight_decay"]
if p.numel() > 1:
# avoid applying this weight-decay on "scaling factors"
# (which are scalar).
is_above_target_rms = p.norm() > (
target_rms * (p.numel() ** 0.5)
)
p.mul_(1 - (weight_decay * is_above_target_rms))
p.addcdiv_(exp_avg, denom, value=-step_size)
# if random.random() < 0.0005:
# step = (exp_avg / denom) * step_size
# logging.info(
# f"Delta rms = {(step**2).mean().item()}, shape = {step.shape}"
# )
return loss
def _test_scaled_adam(hidden_dim: int):
import timeit
from scaling import ScaledLinear
E = 100
B = 4
T = 2
logging.info("in test_eve_cain")
# device = torch.device('cuda')
device = torch.device("cpu")
dtype = torch.float32
fix_random_seed(42)
# these input_magnitudes and output_magnitudes are to test that
# Abel is working as we expect and is able to adjust scales of
# different dims differently.
input_magnitudes = (1.0 * torch.randn(E, dtype=dtype, device=device)).exp()
output_magnitudes = (1.0 * torch.randn(E, dtype=dtype, device=device)).exp()
for iter in [1, 0]:
fix_random_seed(42)
Linear = torch.nn.Linear if iter == 0 else ScaledLinear
m = torch.nn.Sequential(
Linear(E, hidden_dim),
torch.nn.PReLU(),
Linear(hidden_dim, hidden_dim),
torch.nn.PReLU(),
Linear(hidden_dim, E),
).to(device)
train_pairs = [
(
100.0
* torch.randn(B, T, E, device=device, dtype=dtype)
* input_magnitudes,
torch.randn(B, T, E, device=device, dtype=dtype)
* output_magnitudes,
)
for _ in range(20)
]
if iter == 0:
optim = Eve(m.parameters(), lr=0.003)
elif iter == 1:
optim = ScaledAdam(m.parameters(), lr=0.03, clipping_scale=2.0)
scheduler = Eden(optim, lr_batches=200, lr_epochs=5, verbose=False)
start = timeit.default_timer()
avg_loss = 0.0
for epoch in range(180):
scheduler.step_epoch()
# if epoch == 100 and iter in [2,3]:
# optim.reset_speedup() # check it doesn't crash.
# if epoch == 130:
# opts = diagnostics.TensorDiagnosticOptions(
# 2 ** 22
# ) # allow 4 megabytes per sub-module
# diagnostic = diagnostics.attach_diagnostics(m, opts)
for n, (x, y) in enumerate(train_pairs):
y_out = m(x)
loss = ((y_out - y) ** 2).mean() * 100.0
if epoch == 0 and n == 0:
avg_loss = loss.item()
else:
avg_loss = 0.98 * avg_loss + 0.02 * loss.item()
if n == 0 and epoch % 5 == 0:
# norm1 = '%.2e' % (m[0].weight**2).mean().sqrt().item()
# norm1b = '%.2e' % (m[0].bias**2).mean().sqrt().item()
# norm2 = '%.2e' % (m[2].weight**2).mean().sqrt().item()
# norm2b = '%.2e' % (m[2].bias**2).mean().sqrt().item()
# scale1 = '%.2e' % (m[0].weight_scale.exp().item())
# scale1b = '%.2e' % (m[0].bias_scale.exp().item())
# scale2 = '%.2e' % (m[2].weight_scale.exp().item())
# scale2b = '%.2e' % (m[2].bias_scale.exp().item())
lr = scheduler.get_last_lr()[0]
logging.info(
f"Iter {iter}, epoch {epoch}, batch {n}, avg_loss {avg_loss:.4g}, lr={lr:.4e}"
) # , norms={norm1,norm1b,norm2,norm2b}") # scales={scale1,scale1b,scale2,scale2b}
loss.log().backward()
optim.step()
optim.zero_grad()
scheduler.step_batch()
# diagnostic.print_diagnostics()
stop = timeit.default_timer()
logging.info(f"Iter={iter}, Time taken: {stop - start}")
logging.info(f"last lr = {scheduler.get_last_lr()}")
# logging.info("state dict = ", scheduler.state_dict())
# logging.info("optim state_dict = ", optim.state_dict())
logging.info(f"input_magnitudes = {input_magnitudes}")
logging.info(f"output_magnitudes = {output_magnitudes}")
if __name__ == "__main__":
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
logging.getLogger().setLevel(logging.INFO)
import subprocess
s = subprocess.check_output(
"git status -uno .; git log -1; git diff HEAD .", shell=True
)
logging.info(s)
import sys
if len(sys.argv) > 1:
hidden_dim = int(sys.argv[1])
else:
hidden_dim = 200
_test_scaled_adam(hidden_dim)
_test_eden()
| EXA-1-master | exa/models/valle/vall-e-main/valle/modules/optim.py |
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, dataclass
from typing import Any, Dict, Optional, Union
import numpy as np
import torch
from lhotse.features.base import FeatureExtractor
from lhotse.utils import EPSILON, Seconds, compute_num_frames
from librosa.filters import mel as librosa_mel_fn
@dataclass
class BigVGANFbankConfig:
# Spectogram-related part
# Note that frame_length and frame_shift will be converted to milliseconds before torchaudio/Kaldi sees them
frame_length: Seconds = 1024 / 24000.0
frame_shift: Seconds = 256 / 24000.0
remove_dc_offset: bool = True
round_to_power_of_two: bool = True
# Fbank-related part
low_freq: float = 0.0
high_freq: float = 12000.0
num_mel_bins: int = 100
use_energy: bool = False
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@staticmethod
def from_dict(data: Dict[str, Any]) -> "BigVGANFbankConfig":
return BigVGANFbankConfig(**data)
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
# https://github.com/NVIDIA/BigVGAN
# bigvgan_24khz_100band https://drive.google.com/drive/folders/1EpxX6AsxjCbbk0mmAhE0td6eYiABr8Oz
class BigVGANFbank(FeatureExtractor):
name = "fbank"
config_type = BigVGANFbankConfig
def __init__(self, config: Optional[Any] = None):
super(BigVGANFbank, self).__init__(config)
sampling_rate = 24000
self.mel_basis = torch.from_numpy(
librosa_mel_fn(
sampling_rate,
1024,
self.config.num_mel_bins,
self.config.low_freq,
self.config.high_freq,
).astype(np.float32)
)
self.hann_window = torch.hann_window(1024)
def _feature_fn(self, samples, **kwargs):
win_length, n_fft = 1024, 1024
hop_size = 256
if True:
sampling_rate = 24000
duration = round(samples.shape[-1] / sampling_rate, ndigits=12)
expected_num_frames = compute_num_frames(
duration=duration,
frame_shift=self.frame_shift,
sampling_rate=sampling_rate,
)
pad_size = (
(expected_num_frames - 1) * hop_size
+ win_length
- samples.shape[-1]
)
assert pad_size >= 0
y = torch.nn.functional.pad(
samples,
(0, pad_size),
mode="constant",
)
else:
y = torch.nn.functional.pad(
samples,
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
mode="reflect",
)
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_length,
window=self.hann_window,
center=False,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
spec = torch.matmul(self.mel_basis, spec)
spec = spectral_normalize_torch(spec)
return spec.transpose(2, 1).squeeze(0)
def extract(
self, samples: Union[np.ndarray, torch.Tensor], sampling_rate: int
) -> np.ndarray:
assert sampling_rate == 24000
params = asdict(self.config)
params.update({"sample_frequency": sampling_rate, "snip_edges": False})
params["frame_shift"] *= 1000.0
params["frame_length"] *= 1000.0
if not isinstance(samples, torch.Tensor):
samples = torch.from_numpy(samples)
# Torchaudio Kaldi feature extractors expect the channel dimension to be first.
if len(samples.shape) == 1:
samples = samples.unsqueeze(0)
features = self._feature_fn(samples, **params).to(torch.float32)
return features.numpy()
@property
def frame_shift(self) -> Seconds:
return self.config.frame_shift
def feature_dim(self, sampling_rate: int) -> int:
return self.config.num_mel_bins
@staticmethod
def mix(
features_a: np.ndarray,
features_b: np.ndarray,
energy_scaling_factor_b: float,
) -> np.ndarray:
return np.log(
np.maximum(
# protection against log(0); max with EPSILON is adequate since these are energies (always >= 0)
EPSILON,
np.exp(features_a)
+ energy_scaling_factor_b * np.exp(features_b),
)
)
@staticmethod
def compute_energy(features: np.ndarray) -> float:
return float(np.sum(np.exp(features)))
def get_fbank_extractor() -> BigVGANFbank:
return BigVGANFbank(BigVGANFbankConfig())
if __name__ == "__main__":
extractor = BigVGANFbank(BigVGANFbankConfig())
samples = torch.from_numpy(np.random.random([1000]).astype(np.float32))
samples = torch.clip(samples, -1.0, 1.0)
fbank = extractor.extract(samples, 24000.0)
print(f"fbank {fbank.shape}")
from scipy.io.wavfile import read
MAX_WAV_VALUE = 32768.0
sampling_rate, samples = read(
"egs/libritts/prompts/5639_40744_000000_000002.wav"
)
print(f"samples: [{samples.min()}, {samples.max()}]")
fbank = extractor.extract(samples.astype(np.float32) / MAX_WAV_VALUE, 24000)
print(f"fbank {fbank.shape}")
import matplotlib.pyplot as plt
_ = plt.figure(figsize=(18, 10))
plt.imshow(
X=fbank.transpose(1, 0),
cmap=plt.get_cmap("jet"),
aspect="auto",
interpolation="nearest",
)
plt.gca().invert_yaxis()
plt.savefig("egs/libritts/prompts/5639_40744_000000_000002.png")
plt.close()
print("fbank test PASS!")
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/fbank.py |
from .datamodule import *
from .tokenizer import *
from .collation import *
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/__init__.py |
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
from valle.utils import SymbolTable
class TextTokenCollater:
"""Collate list of text tokens
Map sentences to integers. Sentences are padded to equal length.
Beginning and end-of-sequence symbols can be added.
Example:
>>> token_collater = TextTokenCollater(text_tokens)
>>> tokens_batch, tokens_lens = token_collater(text)
Returns:
tokens_batch: IntTensor of shape (B, L)
B: batch dimension, number of input sentences
L: length of the longest sentence
tokens_lens: IntTensor of shape (B,)
Length of each sentence after adding <eos> and <bos>
but before padding.
"""
def __init__(
self,
text_tokens: List[str],
add_eos: bool = True,
add_bos: bool = True,
pad_symbol: str = "<pad>",
bos_symbol: str = "<bos>",
eos_symbol: str = "<eos>",
):
self.pad_symbol = pad_symbol
self.add_eos = add_eos
self.add_bos = add_bos
self.bos_symbol = bos_symbol
self.eos_symbol = eos_symbol
unique_tokens = (
[pad_symbol]
+ ([bos_symbol] if add_bos else [])
+ ([eos_symbol] if add_eos else [])
+ sorted(text_tokens)
)
self.token2idx = {token: idx for idx, token in enumerate(unique_tokens)}
self.idx2token = [token for token in unique_tokens]
def index(
self, tokens_list: List[str]
) -> Tuple[torch.Tensor, torch.Tensor]:
seqs, seq_lens = [], []
for tokens in tokens_list:
assert (
all([True if s in self.token2idx else False for s in tokens])
is True
)
seq = (
([self.bos_symbol] if self.add_bos else [])
+ list(tokens)
+ ([self.eos_symbol] if self.add_eos else [])
)
seqs.append(seq)
seq_lens.append(len(seq))
max_len = max(seq_lens)
for k, (seq, seq_len) in enumerate(zip(seqs, seq_lens)):
seq.extend([self.pad_symbol] * (max_len - seq_len))
tokens = torch.from_numpy(
np.array(
[[self.token2idx[token] for token in seq] for seq in seqs],
dtype=np.int64,
)
)
tokens_lens = torch.IntTensor(seq_lens)
return tokens, tokens_lens
def __call__(self, texts: List[str]) -> Tuple[torch.Tensor, torch.Tensor]:
tokens_seqs = [[p for p in text] for text in texts]
max_len = len(max(tokens_seqs, key=len))
seqs = [
([self.bos_symbol] if self.add_bos else [])
+ list(seq)
+ ([self.eos_symbol] if self.add_eos else [])
+ [self.pad_symbol] * (max_len - len(seq))
for seq in tokens_seqs
]
tokens_batch = torch.from_numpy(
np.array(
[[self.token2idx[token] for token in seq] for seq in seqs],
dtype=np.int64,
)
)
tokens_lens = torch.IntTensor(
[
len(seq) + int(self.add_eos) + int(self.add_bos)
for seq in tokens_seqs
]
)
return tokens_batch, tokens_lens
def get_text_token_collater(text_tokens_file: str) -> TextTokenCollater:
text_tokens_path = Path(text_tokens_file)
unique_tokens = SymbolTable.from_file(text_tokens_path)
collater = TextTokenCollater(
unique_tokens.symbols, add_bos=True, add_eos=True
)
return collater
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/collation.py |
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
modified from lhoste.dataset.speech_synthesis.py
"""
from typing import Callable, Dict, List, Sequence, Union
import torch
from lhotse import validate
from lhotse.cut import CutSet
from lhotse.dataset.collation import collate_audio
from lhotse.dataset.input_strategies import BatchIO, PrecomputedFeatures
from lhotse.utils import ifnone
from valle.data.collation import TextTokenCollater
class SpeechSynthesisDataset(torch.utils.data.Dataset):
"""
The PyTorch Dataset for the speech synthesis(e.g. TTS) task.
Each item in this dataset is a dict of:
.. code-block::
{
'audio': (B x NumSamples) float tensor
'audio_lens': (B, ) int tensor
'text': str
'audio_features': (B x NumFrames x NumFeatures) float tensor
'audio_features_lens': (B, ) int tensor
'text_tokens': (B x NumTextTokens) long tensor
'text_tokens_lens': (B, ) int tensor
}
"""
def __init__(
self,
text_token_collater: TextTokenCollater,
cut_transforms: List[Callable[[CutSet], CutSet]] = None,
feature_input_strategy: BatchIO = PrecomputedFeatures(),
feature_transforms: Union[Sequence[Callable], Callable] = None,
) -> None:
super().__init__()
self.text_token_collater = text_token_collater
self.cut_transforms = ifnone(cut_transforms, [])
self.feature_input_strategy = feature_input_strategy
if feature_transforms is None:
feature_transforms = []
elif not isinstance(feature_transforms, Sequence):
feature_transforms = [feature_transforms]
assert all(
isinstance(transform, Callable) for transform in feature_transforms
), "Feature transforms must be Callable"
self.feature_transforms = feature_transforms
def __getitem__(self, cuts: CutSet) -> Dict[str, torch.Tensor]:
validate_for_tts(cuts)
for transform in self.cut_transforms:
cuts = transform(cuts)
if False: # not used
audio, audio_lens = collate_audio(cuts)
else: # for sharing tokenized features in different machines
audio, audio_lens = None, None
audio_features, audio_features_lens = self.feature_input_strategy(cuts)
for transform in self.feature_transforms:
audio_features = transform(audio_features)
text_tokens, text_tokens_lens = self.text_token_collater(
[cut.supervisions[0].custom["tokens"]["text"] for cut in cuts]
)
return {
"utt_id": [cut.id for cut in cuts],
"text": [cut.supervisions[0].text for cut in cuts],
"audio": audio,
"audio_lens": audio_lens,
"audio_features": audio_features,
"audio_features_lens": audio_features_lens,
"text_tokens": text_tokens,
"text_tokens_lens": text_tokens_lens,
}
def validate_for_tts(cuts: CutSet) -> None:
validate(cuts)
for cut in cuts:
assert (
len(cut.supervisions) == 1
), "Only the Cuts with single supervision are supported."
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/dataset.py |
#!/usr/bin/env python3
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Pattern, Union
import numpy as np
import torch
import torchaudio
from encodec import EncodecModel
from encodec.utils import convert_audio
from lhotse.features import FeatureExtractor
from lhotse.utils import Seconds, compute_num_frames
from phonemizer.backend import EspeakBackend
from phonemizer.backend.espeak.language_switch import LanguageSwitch
from phonemizer.backend.espeak.words_mismatch import WordMismatch
from phonemizer.punctuation import Punctuation
from phonemizer.separator import Separator
try:
from pypinyin import Style, pinyin
from pypinyin.style._utils import get_finals, get_initials
except Exception:
pass
class PypinyinBackend:
"""PypinyinBackend for Chinese. Most codes is referenced from espnet.
There are two types pinyin or initials_finals, one is
just like "ni1 hao3", the other is like "n i1 h ao3".
"""
def __init__(
self,
backend="initials_finals",
punctuation_marks: Union[str, Pattern] = Punctuation.default_marks(),
) -> None:
self.backend = backend
self.punctuation_marks = punctuation_marks
def phonemize(
self, text: List[str], separator: Separator, strip=True, njobs=1
) -> List[str]:
assert isinstance(text, List)
phonemized = []
for _text in text:
_text = re.sub(" +", " ", _text.strip())
_text = _text.replace(" ", separator.word)
phones = []
if self.backend == "pypinyin":
for n, py in enumerate(
pinyin(
_text, style=Style.TONE3, neutral_tone_with_five=True
)
):
if all([c in self.punctuation_marks for c in py[0]]):
if len(phones):
assert phones[-1] == separator.syllable
phones.pop(-1)
phones.extend(list(py[0]))
else:
phones.extend([py[0], separator.syllable])
elif self.backend == "pypinyin_initials_finals":
for n, py in enumerate(
pinyin(
_text, style=Style.TONE3, neutral_tone_with_five=True
)
):
if all([c in self.punctuation_marks for c in py[0]]):
if len(phones):
assert phones[-1] == separator.syllable
phones.pop(-1)
phones.extend(list(py[0]))
else:
if py[0][-1].isalnum():
initial = get_initials(py[0], strict=False)
if py[0][-1].isdigit():
final = (
get_finals(py[0][:-1], strict=False)
+ py[0][-1]
)
else:
final = get_finals(py[0], strict=False)
phones.extend(
[
initial,
separator.phone,
final,
separator.syllable,
]
)
else:
assert ValueError
else:
raise NotImplementedError
phonemized.append(
"".join(phones).rstrip(f"{separator.word}{separator.syllable}")
)
return phonemized
class TextTokenizer:
"""Phonemize Text."""
def __init__(
self,
language="en-us",
backend="espeak",
separator=Separator(word="_", syllable="-", phone="|"),
preserve_punctuation=True,
punctuation_marks: Union[str, Pattern] = Punctuation.default_marks(),
with_stress: bool = False,
tie: Union[bool, str] = False,
language_switch: LanguageSwitch = "keep-flags",
words_mismatch: WordMismatch = "ignore",
) -> None:
if backend == "espeak":
phonemizer = EspeakBackend(
language,
punctuation_marks=punctuation_marks,
preserve_punctuation=preserve_punctuation,
with_stress=with_stress,
tie=tie,
language_switch=language_switch,
words_mismatch=words_mismatch,
)
elif backend in ["pypinyin", "pypinyin_initials_finals"]:
phonemizer = PypinyinBackend(
backend=backend,
punctuation_marks=punctuation_marks + separator.word,
)
else:
raise NotImplementedError(f"{backend}")
self.backend = phonemizer
self.separator = separator
def to_list(self, phonemized: str) -> List[str]:
fields = []
for word in phonemized.split(self.separator.word):
# "ɐ m|iː|n?" ɹ|ɪ|z|ɜː|v; h|ɪ|z.
pp = re.findall(r"\w+|[^\w\s]", word, re.UNICODE)
fields.extend(
[p for p in pp if p != self.separator.phone]
+ [self.separator.word]
)
assert len("".join(fields[:-1])) == len(phonemized) - phonemized.count(
self.separator.phone
)
return fields[:-1]
def __call__(self, text, strip=True) -> List[List[str]]:
if isinstance(text, str):
text = [text]
phonemized = self.backend.phonemize(
text, separator=self.separator, strip=strip, njobs=1
)
return [self.to_list(p) for p in phonemized]
def tokenize_text(tokenizer: TextTokenizer, text: str) -> List[str]:
phonemes = tokenizer([text.strip()])
return phonemes[0] # k2symbols
def remove_encodec_weight_norm(model):
from encodec.modules import SConv1d
from encodec.modules.seanet import SConvTranspose1d, SEANetResnetBlock
from torch.nn.utils import remove_weight_norm
encoder = model.encoder.model
for key in encoder._modules:
if isinstance(encoder._modules[key], SEANetResnetBlock):
remove_weight_norm(encoder._modules[key].shortcut.conv.conv)
block_modules = encoder._modules[key].block._modules
for skey in block_modules:
if isinstance(block_modules[skey], SConv1d):
remove_weight_norm(block_modules[skey].conv.conv)
elif isinstance(encoder._modules[key], SConv1d):
remove_weight_norm(encoder._modules[key].conv.conv)
decoder = model.decoder.model
for key in decoder._modules:
if isinstance(decoder._modules[key], SEANetResnetBlock):
remove_weight_norm(decoder._modules[key].shortcut.conv.conv)
block_modules = decoder._modules[key].block._modules
for skey in block_modules:
if isinstance(block_modules[skey], SConv1d):
remove_weight_norm(block_modules[skey].conv.conv)
elif isinstance(decoder._modules[key], SConvTranspose1d):
remove_weight_norm(decoder._modules[key].convtr.convtr)
elif isinstance(decoder._modules[key], SConv1d):
remove_weight_norm(decoder._modules[key].conv.conv)
class AudioTokenizer:
"""EnCodec audio."""
def __init__(
self,
device: Any = None,
) -> None:
# Instantiate a pretrained EnCodec model
model = EncodecModel.encodec_model_24khz()
model.set_target_bandwidth(6.0)
remove_encodec_weight_norm(model)
if not device:
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda:0")
self._device = device
self.codec = model.to(device)
self.sample_rate = model.sample_rate
self.channels = model.channels
@property
def device(self):
return self._device
def encode(self, wav: torch.Tensor) -> torch.Tensor:
return self.codec.encode(wav.to(self.device))
def decode(self, frames: torch.Tensor) -> torch.Tensor:
return self.codec.decode(frames)
def tokenize_audio(tokenizer: AudioTokenizer, audio_path: str):
# Load and pre-process the audio waveform
wav, sr = torchaudio.load(audio_path)
wav = convert_audio(wav, sr, tokenizer.sample_rate, tokenizer.channels)
wav = wav.unsqueeze(0)
# Extract discrete codes from EnCodec
with torch.no_grad():
encoded_frames = tokenizer.encode(wav)
return encoded_frames
@dataclass
class AudioTokenConfig:
frame_shift: Seconds = 320.0 / 24000
num_quantizers: int = 8
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@staticmethod
def from_dict(data: Dict[str, Any]) -> "AudioTokenConfig":
return AudioTokenConfig(**data)
class AudioTokenExtractor(FeatureExtractor):
name = "encodec"
config_type = AudioTokenConfig
def __init__(self, config: Optional[Any] = None):
super(AudioTokenExtractor, self).__init__(config)
self.tokenizer = AudioTokenizer()
def extract(
self, samples: Union[np.ndarray, torch.Tensor], sampling_rate: int
) -> np.ndarray:
if not isinstance(samples, torch.Tensor):
samples = torch.from_numpy(samples)
if sampling_rate != self.tokenizer.sample_rate:
samples = convert_audio(
samples,
sampling_rate,
self.tokenizer.sample_rate,
self.tokenizer.channels,
)
if len(samples.shape) == 2:
samples = samples.unsqueeze(0)
else:
raise ValueError()
device = self.tokenizer.device
encoded_frames = self.tokenizer.encode(samples.detach().to(device))
codes = encoded_frames[0][0] # [B, n_q, T]
if True:
duration = round(samples.shape[-1] / sampling_rate, ndigits=12)
expected_num_frames = compute_num_frames(
duration=duration,
frame_shift=self.frame_shift,
sampling_rate=sampling_rate,
)
assert abs(codes.shape[-1] - expected_num_frames) <= 1
codes = codes[..., :expected_num_frames]
return codes.cpu().squeeze(0).permute(1, 0).numpy()
@property
def frame_shift(self) -> Seconds:
return self.config.frame_shift
def feature_dim(self, sampling_rate: int) -> int:
return self.config.num_quantizers
def pad_tensor_list(self, tensor_list, device, padding_value=0):
# 计算每个张量的长度
lengths = [tensor.shape[0] for tensor in tensor_list]
# 使用pad_sequence函数进行填充
tensor_list = [torch.Tensor(t).to(device) for t in tensor_list]
padded_tensor = torch.nn.utils.rnn.pad_sequence(
tensor_list, batch_first=True, padding_value=padding_value
)
return padded_tensor, lengths
def extract_batch(self, samples, sampling_rate, lengths) -> np.ndarray:
samples = [wav.squeeze() for wav in samples]
device = self.tokenizer.device
samples, lengths = self.pad_tensor_list(samples, device)
samples = samples.unsqueeze(1)
if not isinstance(samples, torch.Tensor):
samples = torch.from_numpy(samples)
if len(samples.shape) != 3:
raise ValueError()
if sampling_rate != self.tokenizer.sample_rate:
samples = [
convert_audio(
wav,
sampling_rate,
self.tokenizer.sample_rate,
self.tokenizer.channels,
)
for wav in samples
]
# Extract discrete codes from EnCodec
with torch.no_grad():
encoded_frames = self.tokenizer.encode(samples.detach().to(device))
encoded_frames = encoded_frames[0][0] # [B, n_q, T]
batch_codes = []
for b, length in enumerate(lengths):
codes = encoded_frames[b]
duration = round(length / sampling_rate, ndigits=12)
expected_num_frames = compute_num_frames(
duration=duration,
frame_shift=self.frame_shift,
sampling_rate=sampling_rate,
)
batch_codes.append(codes[..., :expected_num_frames])
return [codes.cpu().permute(1, 0).numpy() for codes in batch_codes]
if __name__ == "__main__":
model = EncodecModel.encodec_model_24khz()
model.set_target_bandwidth(6.0)
samples = torch.from_numpy(np.random.random([4, 1, 1600])).type(
torch.float32
)
codes_raw = model.encode(samples)
remove_encodec_weight_norm(model)
codes_norm = model.encode(samples)
assert torch.allclose(codes_raw[0][0], codes_norm[0][0])
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/tokenizer.py |
import random
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from typing import Tuple, Type
from lhotse import CutSet
from lhotse.dataset.collation import collate_features
from lhotse.dataset.input_strategies import (
ExecutorType,
PrecomputedFeatures,
_get_executor,
)
from lhotse.utils import fastcopy
class PromptedFeatures:
def __init__(self, prompts, features):
self.prompts = prompts
self.features = features
def to(self, device):
return PromptedFeatures(
self.prompts.to(device), self.features.to(device)
)
def sum(self):
return self.features.sum()
@property
def ndim(self):
return self.features.ndim
@property
def data(self):
return (self.prompts, self.features)
class PromptedPrecomputedFeatures(PrecomputedFeatures):
"""
:class:`InputStrategy` that reads pre-computed features, whose manifests
are attached to cuts, from disk.
It automatically pads the feature matrices with pre or post feature.
.. automethod:: __call__
"""
def __init__(
self,
dataset: str,
cuts: CutSet,
num_workers: int = 0,
executor_type: Type[ExecutorType] = ThreadPoolExecutor,
) -> None:
super(PromptedPrecomputedFeatures, self).__init__(
num_workers, executor_type
)
self.utt2neighbors = defaultdict(lambda: [])
if dataset.lower() == "libritts":
# 909_131041_000013_000002
# 909_131041_000013_000003
speaker2utts = defaultdict(lambda: [])
utt2cut = {}
for cut in cuts:
speaker = cut.supervisions[0].speaker
speaker2utts[speaker].append(cut.id)
utt2cut[cut.id] = cut
for spk in speaker2utts:
uttids = sorted(speaker2utts[spk])
# Using the property of sorted keys to find previous utterance
# The keys has structure speaker_book_x_y e.g. 1089_134691_000004_000001
if len(uttids) == 1:
self.utt2neighbors[uttids[0]].append(utt2cut[uttids[0]])
continue
utt2prevutt = dict(zip(uttids, [uttids[1]] + uttids[:-1]))
utt2postutt = dict(zip(uttids[:-1], uttids[1:]))
for utt in utt2prevutt:
self.utt2neighbors[utt].append(utt2cut[utt2prevutt[utt]])
for utt in utt2postutt:
self.utt2neighbors[utt].append(utt2cut[utt2postutt[utt]])
elif dataset.lower() == "ljspeech":
utt2cut = {}
uttids = []
for cut in cuts:
uttids.append(cut.id)
utt2cut[cut.id] = cut
if len(uttids) == 1:
self.utt2neighbors[uttids[0]].append(utt2cut[uttids[0]])
else:
# Using the property of sorted keys to find previous utterance
# The keys has structure: LJ001-0010
utt2prevutt = dict(zip(uttids, [uttids[1]] + uttids[:-1]))
utt2postutt = dict(zip(uttids[:-1], uttids[1:]))
for utt in utt2postutt:
postutt = utt2postutt[utt]
if utt[:5] == postutt[:5]:
self.utt2neighbors[utt].append(utt2cut[postutt])
for utt in utt2prevutt:
prevutt = utt2prevutt[utt]
if utt[:5] == prevutt[:5] or not self.utt2neighbors[utt]:
self.utt2neighbors[utt].append(utt2cut[prevutt])
else:
raise ValueError
def __call__(
self, cuts: CutSet
) -> Tuple[PromptedFeatures, PromptedFeatures]:
"""
Reads the pre-computed features from disk/other storage.
The returned shape is``(B, T, F) => (batch_size, num_frames, num_features)``.
:return: a tensor with collated features, and a tensor of ``num_frames`` of each cut before padding.
"""
features, features_lens = collate_features(
cuts,
executor=_get_executor(
self.num_workers, executor_type=self._executor_type
),
)
prompts_cuts = []
for k, cut in enumerate(cuts):
prompts_cut = random.choice(self.utt2neighbors[cut.id])
prompts_cuts.append(fastcopy(prompts_cut, id=f"{cut.id}-{str(k)}"))
mini_duration = min([cut.duration for cut in prompts_cuts] + [3.0])
# prompts_cuts = CutSet.from_cuts(prompts_cuts).truncate(
# max_duration=mini_duration,
# offset_type="random",
# preserve_id=True,
# )
prompts_cuts = CutSet(
cuts={k: cut for k, cut in enumerate(prompts_cuts)}
).truncate(
max_duration=mini_duration,
offset_type="random",
preserve_id=False,
)
prompts, prompts_lens = collate_features(
prompts_cuts,
executor=_get_executor(
self.num_workers, executor_type=self._executor_type
),
)
return PromptedFeatures(prompts, features), PromptedFeatures(
prompts_lens, features_lens
)
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/input_strategies.py |
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, Optional
import torch
from icefall.utils import str2bool
from lhotse import CutSet, load_manifest_lazy
from lhotse.dataset import (
CutConcatenate,
DynamicBucketingSampler,
PrecomputedFeatures,
SingleCutSampler,
SpecAugment,
)
from lhotse.dataset.input_strategies import OnTheFlyFeatures
from lhotse.utils import fix_random_seed
from torch.utils.data import DataLoader
from valle.data.collation import get_text_token_collater
from valle.data.dataset import SpeechSynthesisDataset
from valle.data.fbank import get_fbank_extractor
from valle.data.input_strategies import PromptedPrecomputedFeatures
PrecomputedFeatures = PrecomputedFeatures
class _SeedWorkers:
def __init__(self, seed: int):
self.seed = seed
def __call__(self, worker_id: int):
fix_random_seed(self.seed + worker_id)
def _get_input_strategy(input_strategy, dataset, cuts):
if input_strategy == "PromptedPrecomputedFeatures":
return PromptedPrecomputedFeatures(dataset, cuts)
return eval(input_strategy)()
class TtsDataModule:
"""
DataModule for VALL-E TTS experiments.
It assumes there is always one train and valid dataloader.
It contains all the common data pipeline modules used in TTS
experiments, e.g.:
- dynamic batch size,
- bucketing samplers,
- cut concatenation[not used & tested yet],
- augmentation[not used & tested yet],
- on-the-fly feature extraction[not used & tested yet]
This class should be derived for specific corpora used in TTS tasks.
"""
def __init__(self, args: argparse.Namespace):
self.args = args
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
group = parser.add_argument_group(
title="TTS data related options",
description="These options are used for the preparation of "
"PyTorch DataLoaders from Lhotse CutSet's -- they control the "
"effective batch sizes, sampling strategies, applied data "
"augmentations, etc.",
)
group.add_argument(
"--manifest-dir",
type=Path,
default=Path("data/tokenized"),
help="Path to directory with train/valid/test cuts.",
)
group.add_argument(
"--max-duration",
type=int,
default=40.0,
help="Maximum pooled recordings duration (seconds) in a "
"single batch. You can reduce it if it causes CUDA OOM.",
)
group.add_argument(
"--bucketing-sampler",
type=str2bool,
default=True,
help="When enabled, the batches will come from buckets of "
"similar duration (saves padding frames).",
)
group.add_argument(
"--num-buckets",
type=int,
default=10,
help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
"--concatenate-cuts",
type=str2bool,
default=False,
help="When enabled, utterances (cuts) will be concatenated "
"to minimize the amount of padding.",
)
group.add_argument(
"--duration-factor",
type=float,
default=1.0,
help="Determines the maximum duration of a concatenated cut "
"relative to the duration of the longest cut in a batch.",
)
group.add_argument(
"--gap",
type=float,
default=0.1,
help="The amount of padding (in seconds) inserted between "
"concatenated cuts. This padding is filled with noise when "
"noise augmentation is used.",
)
group.add_argument(
"--on-the-fly-feats",
type=str2bool,
default=False,
help="When enabled, use on-the-fly cut mixing and feature "
"extraction. Will drop existing precomputed feature manifests "
"if available.",
)
group.add_argument(
"--shuffle",
type=str2bool,
default=True,
help="When enabled (=default), the examples will be "
"shuffled for each epoch.",
)
group.add_argument(
"--drop-last",
type=str2bool,
default=False,
help="Whether to drop last batch. Used by sampler.",
)
group.add_argument(
"--return-cuts",
type=str2bool,
default=True,
help="When enabled, each batch will have the "
"field: batch['supervisions']['cut'] with the cuts that "
"were used to construct it.",
)
group.add_argument(
"--num-workers",
type=int,
default=8,
help="The number of training dataloader workers that "
"collect the batches.",
)
group.add_argument(
"--enable-spec-aug",
type=str2bool,
default=False,
help="When enabled, use SpecAugment for training dataset.",
)
group.add_argument(
"--spec-aug-time-warp-factor",
type=int,
default=80,
help="Used only when --enable-spec-aug is True. "
"It specifies the factor for time warping in SpecAugment. "
"Larger values mean more warping. "
"A value less than 1 means to disable time warp.",
)
group.add_argument(
"--input-strategy",
type=str,
default="PrecomputedFeatures",
help="AudioSamples or PrecomputedFeatures or PromptedPrecomputedFeatures",
)
group.add_argument(
"--dataset",
type=str,
default="libritts",
help="--input-strategy PromptedPrecomputedFeatures needs dataset name to prepare prompts.",
)
parser.add_argument(
"--text-tokens",
type=str,
default="data/tokenized/unique_text_tokens.k2symbols",
help="Path to the unique text tokens file",
)
parser.add_argument(
"--sampling-rate",
type=int,
default=24000,
help="""Audio sampling rate.""",
)
def train_dataloaders(
self,
cuts_train: CutSet,
sampler_state_dict: Optional[Dict[str, Any]] = None,
) -> DataLoader:
"""
Args:
cuts_train:
CutSet for training.
sampler_state_dict:
The state dict for the training sampler.
"""
transforms = []
if self.args.concatenate_cuts:
logging.info(
f"Using cut concatenation with duration factor "
f"{self.args.duration_factor} and gap {self.args.gap}."
)
# Cut concatenation should be the first transform in the list,
# so that if we e.g. mix noise in, it will fill the gaps between
# different utterances.
transforms = [
CutConcatenate(
duration_factor=self.args.duration_factor, gap=self.args.gap
)
] + transforms
input_transforms = []
if self.args.enable_spec_aug:
logging.info("Enable SpecAugment")
logging.info(
f"Time warp factor: {self.args.spec_aug_time_warp_factor}"
)
# Set the value of num_frame_masks according to Lhotse's version.
# In different Lhotse's versions, the default of num_frame_masks is
# different.
num_frame_masks = 10
num_frame_masks_parameter = inspect.signature(
SpecAugment.__init__
).parameters["num_frame_masks"]
if num_frame_masks_parameter.default == 1:
num_frame_masks = 2
logging.info(f"Num frame mask: {num_frame_masks}")
input_transforms.append(
SpecAugment(
time_warp_factor=self.args.spec_aug_time_warp_factor,
num_frame_masks=num_frame_masks,
features_mask_size=27,
num_feature_masks=2,
frames_mask_size=100,
)
)
else:
logging.info("Disable SpecAugment")
logging.info("About to create train dataset")
if self.args.on_the_fly_feats:
# NOTE: the PerturbSpeed transform should be added only if we
# remove it from data prep stage.
# Add on-the-fly speed perturbation; since originally it would
# have increased epoch size by 3, we will apply prob 2/3 and use
# 3x more epochs.
# Speed perturbation probably should come first before
# concatenation, but in principle the transforms order doesn't have
# to be strict (e.g. could be randomized)
# transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa
# Drop feats to be on the safe side.
train = SpeechSynthesisDataset(
get_text_token_collater(self.args.text_tokens),
cut_transforms=transforms,
feature_input_strategy=OnTheFlyFeatures(get_fbank_extractor()),
feature_transforms=input_transforms,
)
else:
train = SpeechSynthesisDataset(
get_text_token_collater(self.args.text_tokens),
feature_input_strategy=_get_input_strategy(
self.args.input_strategy, self.args.dataset, cuts_train
),
cut_transforms=transforms,
feature_transforms=input_transforms,
)
if self.args.bucketing_sampler:
logging.info("Using DynamicBucketingSampler")
train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
drop_last=self.args.drop_last,
)
else:
logging.info(
"Using SingleCutSampler and sort by duraton(ascending=True)."
)
cuts_train = cuts_train.to_eager().sort_by_duration(ascending=True)
train_sampler = SingleCutSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
)
logging.info("About to create train dataloader")
if sampler_state_dict is not None:
logging.info("Loading sampler state dict")
train_sampler.load_state_dict(sampler_state_dict)
# 'seed' is derived from the current random state, which will have
# previously been set in the main process.
seed = torch.randint(0, 100000, ()).item()
worker_init_fn = _SeedWorkers(seed)
train_dl = DataLoader(
train,
sampler=train_sampler,
batch_size=None,
num_workers=self.args.num_workers,
persistent_workers=False,
worker_init_fn=worker_init_fn,
)
return train_dl
def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader:
logging.info("About to create dev dataset")
if self.args.on_the_fly_feats:
validate = SpeechSynthesisDataset(
get_text_token_collater(self.args.text_tokens),
feature_input_strategy=OnTheFlyFeatures(get_fbank_extractor()),
cut_transforms=[],
)
else:
validate = SpeechSynthesisDataset(
get_text_token_collater(self.args.text_tokens),
feature_input_strategy=_get_input_strategy(
self.args.input_strategy, self.args.dataset, cuts_valid
),
cut_transforms=[],
)
valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
)
logging.info("About to create dev dataloader")
valid_dl = DataLoader(
validate,
sampler=valid_sampler,
batch_size=None,
num_workers=4,
persistent_workers=False,
)
return valid_dl
def test_dataloaders(self, cuts: CutSet) -> DataLoader:
logging.debug("About to create test dataset")
test = SpeechSynthesisDataset(
get_text_token_collater(self.args.text_tokens),
feature_input_strategy=OnTheFlyFeatures(get_fbank_extractor())
if self.args.on_the_fly_feats
else _get_input_strategy(
self.args.input_strategy, self.args.dataset, cuts
),
cut_transforms=[],
)
sampler = DynamicBucketingSampler(
cuts,
max_duration=self.args.max_duration,
shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
test,
batch_size=None,
sampler=sampler,
num_workers=self.args.num_workers,
)
return test_dl
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
return load_manifest_lazy(
self.args.manifest_dir / "cuts_train.jsonl.gz"
)
@lru_cache()
def dev_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
return load_manifest_lazy(self.args.manifest_dir / "cuts_dev.jsonl.gz")
@lru_cache()
def test_cuts(self) -> CutSet:
logging.info("About to get test cuts")
return load_manifest_lazy(self.args.manifest_dir / "cuts_test.jsonl.gz")
| EXA-1-master | exa/models/valle/vall-e-main/valle/data/datamodule.py |
import subprocess
from pathlib import Path
from datetime import datetime
from setuptools import setup, find_packages
def shell(*args):
out = subprocess.check_output(args)
return out.decode("ascii").strip()
def write_version(version_core, pre_release=True):
if pre_release:
time = shell("git", "log", "-1", "--format=%cd", "--date=iso")
time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S %z")
time = time.strftime("%Y%m%d%H%M%S")
version = f"{version_core}-dev{time}"
else:
version = version_core
with open(Path("vall_e", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="vall-e",
python_requires=">=3.10.0",
version=write_version("0.0.1"),
description="An unofficial toy implementation of the audio LM VALL-E",
author="enhuiz",
author_email="[email protected]",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=[
"coloredlogs>=15.0.1",
"deepspeed>=0.7.7",
"diskcache>=5.4.0",
"einops>=0.6.0",
"encodec>=0.1.1",
"g2p_en>=2.1.0",
"humanize>=4.4.0",
"matplotlib>=3.6.0",
"numpy>=1.23.3",
"omegaconf>=2.2.3",
"openTSNE>=0.6.2",
"pandas>=1.5.0",
"soundfile>=0.11.0",
"torch>=1.13.0",
"torchaudio>=0.13.0",
"tqdm>=4.64.1",
],
url="https://github.com/enhuiz/vall-e",
)
| EXA-1-master | exa/models/valle/vall-e-main 2/setup.py |
#!/usr/bin/env python3
import argparse
import json
import re
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
def plot(paths, args):
dfs = []
for path in paths:
with open(path, "r") as f:
text = f.read()
rows = []
pattern = r"(\{.+?\})"
for row in re.findall(pattern, text, re.DOTALL):
try:
row = json.loads(row)
except Exception as e:
continue
if "global_step" in row:
rows.append(row)
df = pd.DataFrame(rows)
if "name" in df:
df["name"] = df["name"].fillna("train")
else:
df["name"] = "train"
df["group"] = str(path.parents[args.group_level])
df["group"] = df["group"] + "/" + df["name"]
dfs.append(df)
df = pd.concat(dfs)
if args.max_y is not None:
df = df[df["global_step"] < args.max_x]
for gtag, gdf in sorted(
df.groupby("group"),
key=lambda p: (p[0].split("/")[-1], p[0]),
):
for y in args.ys:
gdf = gdf.sort_values("global_step")
if gdf[y].isna().all():
continue
if args.max_y is not None:
gdf = gdf[gdf[y] < args.max_y]
gdf[y] = gdf[y].ewm(10).mean()
gdf.plot(
x="global_step",
y=y,
label=f"{gtag}/{y}",
ax=plt.gca(),
marker="x" if len(gdf) < 100 else None,
alpha=0.7,
)
plt.gca().legend(
loc="center left",
fancybox=True,
shadow=True,
bbox_to_anchor=(1.04, 0.5),
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ys", nargs="+")
parser.add_argument("--log-dir", default="logs", type=Path)
parser.add_argument("--out-dir", default="logs", type=Path)
parser.add_argument("--filename", default="log.txt")
parser.add_argument("--max-x", type=float, default=float("inf"))
parser.add_argument("--max-y", type=float, default=float("inf"))
parser.add_argument("--group-level", default=1)
parser.add_argument("--filter", default=None)
args = parser.parse_args()
paths = args.log_dir.rglob(f"**/{args.filename}")
if args.filter:
paths = filter(lambda p: re.match(".*" + args.filter + ".*", str(p)), paths)
plot(paths, args)
name = "-".join(args.ys)
out_path = (args.out_dir / name).with_suffix(".png")
plt.savefig(out_path, bbox_inches="tight")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/scripts/plot.py |
from dataclasses import dataclass, field
from functools import cached_property
from pathlib import Path
import diskcache
from .utils import Config as ConfigBase
@dataclass(frozen=True)
class Config(ConfigBase):
data_root: Path = Path("data")
data_dirs: list[Path] = field(default_factory=lambda: [])
@property
def sample_rate(self):
return 24_000
p_additional_prompt: float = 0.8
max_prompts: int = 3
max_num_val: int = 20
max_val_ar_steps: int = 300
token_dim: int = 256
num_tokens: int = 1024
nj: int = 8
batch_size: int = 32
eval_batch_size: int = 32
warmup_min_lr: float = 1e-6
warmup_max_lr: float = 2e-4
dis_warmup_max_lr: float = 4e-4
warmup_num_steps: int = 1_000
max_iter: int = 1_000_000
gradient_clipping: float = 100
eval_every: int = 2_000
save_ckpt_every: int = 2_000
model: str = "ar-quarter"
spkr_name_getter: str = "lambda p: p.parts[-2]"
min_phones: int = 10
max_phones: int = 50
use_fp16: bool = True
gradient_accumulation_steps: int = 1
sampling_temperature: float = 1.0
cache_dataloader: bool = False
@cached_property
def get_spkr(self):
return eval(self.spkr_name_getter)
@property
def fp16_cfg(self):
return {
"enabled": self.use_fp16,
}
@property
def ds_cfg(self):
return {
"train_micro_batch_size_per_gpu": self.batch_size,
"gradient_accumulation_steps": self.gradient_accumulation_steps,
"optimizer": {
"type": "Adam",
"lr": self.warmup_min_lr,
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"warmup_min_lr": self.warmup_min_lr,
"warmup_max_lr": self.warmup_max_lr,
"warmup_num_steps": self.warmup_num_steps,
"total_num_steps": self.max_iter,
"warmup_type": "linear",
},
},
"gradient_clipping": self.gradient_clipping,
"fp16": self.fp16_cfg,
}
@property
def cache_dir(self):
return ".cache" / self.relpath
@cached_property
def diskcache(self):
if self.cache_dataloader:
return diskcache.Cache(self.cache_dir).memoize
return lambda: lambda x: x
cfg = Config.from_cli()
if __name__ == "__main__":
print(cfg)
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/config.py |
EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/__init__.py |
|
import argparse
import torch
from .data import VALLEDatset, create_train_val_dataloader
from .train import load_engines
def main():
parser = argparse.ArgumentParser("Save trained model to path.")
parser.add_argument("path")
args = parser.parse_args()
engine = load_engines()
model = engine["model"].module.cpu()
train_dl, *_ = create_train_val_dataloader()
assert isinstance(train_dl.dataset, VALLEDatset)
model.phone_symmap = train_dl.dataset.phone_symmap
model.spkr_symmap = train_dl.dataset.spkr_symmap
torch.save(model, args.path)
print(args.path, "saved.")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/export.py |
import json
import logging
from collections import defaultdict
import torch
from tqdm import tqdm
from .config import cfg
from .data import create_train_val_dataloader
from .emb import qnt
from .utils import setup_logging, to_device, trainer
from .vall_e import get_model
_logger = logging.getLogger(__name__)
def load_engines():
model = get_model(cfg.model)
engines = dict(
model=trainer.Engine(
model=model,
config=cfg.ds_cfg,
),
)
return trainer.load_engines(engines, cfg)
def main():
setup_logging(cfg.log_dir)
train_dl, subtrain_dl, val_dl = create_train_val_dataloader()
def train_feeder(engines, batch, name):
model = engines["model"]
if cfg.model.startswith("ar"):
_ = model(
text_list=batch["text"],
proms_list=batch["proms"],
resp_list=batch["resp"],
)
elif cfg.model.startswith("nar"):
_ = model(
text_list=batch["text"],
proms_list=batch["proms"],
resps_list=batch["resps"],
)
else:
raise NotImplementedError(cfg.model)
losses = model.gather_attribute("loss")
loss = torch.stack([*losses.values()]).sum()
stats = {}
stats |= {k: v.item() for k, v in losses.items()}
stats |= engines.gather_attribute("scalar")
return loss, stats
@torch.inference_mode()
def run_eval(engines, name, dl):
log_dir = cfg.log_dir / str(engines.global_step) / name
model = engines["model"]
log_dir = cfg.log_dir / str(engines.global_step) / name
stats = defaultdict(list)
for batch in tqdm(dl):
batch: dict = to_device(batch, cfg.device)
if cfg.model.startswith("ar"):
resp_list = model(
text_list=batch["text"],
proms_list=batch["proms"],
max_steps=cfg.max_val_ar_steps,
sampling_temperature=cfg.sampling_temperature,
)
resps_list = [r.unsqueeze(-1) for r in resp_list]
elif cfg.model.startswith("nar"):
resps_list = model(
text_list=batch["text"],
proms_list=batch["proms"],
resps_list=[r.unsqueeze(-1) for r in batch["resp"]],
sampling_temperature=cfg.sampling_temperature,
)
else:
raise NotImplementedError(cfg.model)
losses = model.gather_attribute("loss")
batch_stats = {k: v.item() for k, v in losses.items()}
for k, v in batch_stats.items():
stats[k].append(v)
for path, ref, hyp in zip(batch["path"], batch["resps"], resps_list):
relpath = path.relative_to(cfg.data_root)
hyp_path = (log_dir / "hyp" / relpath).with_suffix(".wav")
ref_path = (log_dir / "ref" / relpath).with_suffix(".wav")
hyp_path.parent.mkdir(parents=True, exist_ok=True)
ref_path.parent.mkdir(parents=True, exist_ok=True)
qnt.decode_to_file(ref, ref_path)
if len(hyp) > 0:
qnt.decode_to_file(hyp, hyp_path)
qnt.unload_model()
stats = {k: sum(v) / len(v) for k, v in stats.items()}
stats["global_step"] = engines.global_step
stats["name"] = name
_logger.info(f"Eval: {stats}.")
_logger.info(f"{json.dumps(stats)}.")
def eval_fn(engines):
run_eval(engines, "subtrain", subtrain_dl)
run_eval(engines, "val", val_dl)
trainer.train(
engines_loader=load_engines,
train_dl=train_dl,
train_feeder=train_feeder,
eval_fn=eval_fn,
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/train.py |
"""
A sampler that balances data by key_fns.
MIT License
Copyright (c) 2023 Zhe Niu
[email protected]
"""
import random
class Sampler:
def __init__(self, l, key_fns):
self.tree = self._build(l, key_fns)
def _build(self, l, key_fns) -> dict[dict, list]:
if not key_fns:
return l
tree = {}
key_fn, *key_fns = key_fns
for x in l:
k = key_fn(x)
if k in tree:
tree[k].append(x)
else:
tree[k] = [x]
for k in tree:
tree[k] = self._build(tree[k], key_fns)
return tree
def _sample(self, tree: dict | list):
if isinstance(tree, list):
ret = random.choice(tree)
else:
key = random.choice([*tree.keys()])
ret = self._sample(tree[key])
return ret
def sample(self):
return self._sample(self.tree)
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/sampler.py |
import argparse
from pathlib import Path
import torch
from einops import rearrange
from .emb import g2p, qnt
from .utils import to_device
def main():
parser = argparse.ArgumentParser("VALL-E TTS")
parser.add_argument("text")
parser.add_argument("reference", type=Path)
parser.add_argument("out_path", type=Path)
parser.add_argument("--ar-ckpt", type=Path, default="zoo/ar.pt")
parser.add_argument("--nar-ckpt", type=Path, default="zoo/nar.pt")
parser.add_argument("--device", default="cuda")
args = parser.parse_args()
ar = torch.load(args.ar_ckpt).to(args.device)
nar = torch.load(args.nar_ckpt).to(args.device)
symmap = ar.phone_symmap
proms = qnt.encode_from_file(args.reference)
proms = rearrange(proms, "1 l t -> t l")
phns = torch.tensor([symmap[p] for p in g2p.encode(args.text)])
proms = to_device(proms, args.device)
phns = to_device(phns, args.device)
resp_list = ar(text_list=[phns], proms_list=[proms])
resps_list = [r.unsqueeze(-1) for r in resp_list]
resps_list = nar(text_list=[phns], proms_list=[proms], resps_list=resps_list)
qnt.decode_to_file(resps=resps_list[0], path=args.out_path)
print(args.out_path, "saved.")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/__main__.py |
import copy
import logging
import random
from collections import defaultdict
from functools import cache, cached_property
from itertools import groupby, zip_longest
from typing import Any
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from .config import cfg
from .sampler import Sampler
torch.multiprocessing.set_sharing_strategy("file_system")
_logger = logging.getLogger(__name__)
def _replace_file_extension(path, suffix):
return (path.parent / path.name.split(".")[0]).with_suffix(suffix)
def _get_quant_path(path):
return _replace_file_extension(path, ".qnt.pt")
def _load_quants(path) -> Tensor:
"""
Returns:
quants: (t q)
"""
path = _get_quant_path(path)
return torch.load(path)[0].t()
@cache
def _get_phones(path):
path = _replace_file_extension(path, ".phn.txt")
with open(path, "r", encoding="utf8") as f:
content = f.read()
return ["<s>"] + content.split() + ["</s>"]
def _interleaved_reorder(l, fn):
groups = defaultdict(list)
for e in l:
groups[fn(e)].append(e)
groups = {k: groups[k] for k in sorted(groups)}
for interleaved in zip_longest(*groups.values()):
for value in interleaved:
if value is not None:
yield value
@cache
def _validate(path, min_phones, max_phones):
phones = _get_phones(path)
unique_phones = list(set(phones))
if len(unique_phones) == 0:
return False
if len(unique_phones) == 1 and unique_phones[0] == "_":
return False
if len(phones) < min_phones:
return False
if len(phones) > max_phones:
return False
return True
class VALLEDatset(Dataset):
def __init__(
self,
paths,
phone_symmap=None,
spkr_symmap=None,
min_phones=cfg.min_phones,
max_phones=cfg.max_phones,
training=False,
extra_paths_by_spkr_name: dict[str, list] = {},
):
super().__init__()
self._head = None
self.min_phones = min_phones
self.max_phones = max_phones
self.paths = [
path for path in paths if _validate(path, self.min_phones, self.max_phones)
]
self.spkr_symmap = spkr_symmap or self._get_spkr_symmap()
self.phone_symmap = phone_symmap or self._get_phone_symmap()
self.training = training
self.paths_by_spkr_name = self._get_paths_by_spkr_name(extra_paths_by_spkr_name)
self.paths = [
p for p in self.paths if len(self.paths_by_spkr_name[cfg.get_spkr(p)]) > 1
]
if len(self.paths) == 0 and training:
raise ValueError("No valid path is found for training.")
if training:
self.sampler = Sampler(self.paths, [cfg.get_spkr])
else:
self.sampler = None
def _get_paths_by_spkr_name(self, extra_paths_by_spkr_name: dict[str, list]):
ret = defaultdict(list)
for path in self.paths:
if _get_quant_path(path).exists():
ret[cfg.get_spkr(path)].append(path)
for k, v in extra_paths_by_spkr_name.items():
ret[k].extend(v)
return {**ret}
@cached_property
def phones(self):
return sorted(set().union(*[_get_phones(path) for path in self.paths]))
def _get_phone_symmap(self):
# Note that we use phone symmap starting from 1 so that we can safely pad 0.
return {s: i for i, s in enumerate(self.phones, 1)}
@cached_property
def spkrs(self):
return sorted({cfg.get_spkr(path) for path in self.paths})
def _get_spkr_symmap(self):
return {s: i for i, s in enumerate(self.spkrs)}
def sample_prompts(self, spkr_name, ignore):
prom_list = []
choices = set(self.paths_by_spkr_name[spkr_name]) - {ignore}
choices = [*choices]
if len(choices) == 0:
raise ValueError(
f"Failed to find another different utterance for {spkr_name}."
)
for _ in range(cfg.max_prompts):
path = random.choice(choices)
prom_list.append(_load_quants(path))
if random.random() > cfg.p_additional_prompt:
break
prom = torch.cat(prom_list)
return prom
def __getitem__(self, index):
if self.training:
assert self.sampler is not None
path = self.sampler.sample()
else:
path = self.paths[index]
spkr_name = cfg.get_spkr(path)
text = torch.tensor([*map(self.phone_symmap.get, _get_phones(path))])
proms = self.sample_prompts(spkr_name, ignore=path)
resps = _load_quants(path)
resp = resps[..., 0]
return dict(
path=path,
spkr_name=spkr_name,
text=text,
proms=proms,
resps=resps,
resp=resp,
)
def head_(self, n):
self._head = n
def training_(self, value):
self.training = value
def interleaved_reorder_(self, fn):
self.paths = [*_interleaved_reorder(self.paths, fn)]
def __len__(self):
return min(len(self.paths), self._head or len(self.paths))
def collate_fn(samples: list[dict]):
batch: dict[str, Any] = {k: [s[k] for s in samples] for k in samples[0]}
return batch
def _seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
def _create_dataloader(dataset, training):
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size if training else cfg.eval_batch_size,
shuffle=training,
drop_last=training,
num_workers=cfg.nj,
collate_fn=collate_fn,
persistent_workers=True,
worker_init_fn=_seed_worker,
)
def _load_train_val_paths():
paths = []
train_paths = []
val_paths = []
for data_dir in cfg.data_dirs:
paths.extend(tqdm(data_dir.rglob("*.qnt.pt")))
if len(paths) == 0:
raise RuntimeError(f"Failed to find any .qnt.pt file in {cfg.data_dirs}.")
pairs = sorted([(cfg.get_spkr(p), p) for p in paths])
del paths
for _, group in groupby(pairs, lambda pair: pair[0]):
paths = sorted([p for _, p in group])
random.seed(0)
random.shuffle(paths)
n = round(len(paths) * 0.95)
train_paths.extend(paths[:n])
val_paths.extend(paths[n:])
train_paths, val_paths = map(sorted, [train_paths, val_paths])
return train_paths, val_paths
@cfg.diskcache()
def create_datasets():
train_paths, val_paths = _load_train_val_paths()
train_dataset = VALLEDatset(
train_paths,
training=True,
)
val_dataset = VALLEDatset(
val_paths,
train_dataset.phone_symmap,
train_dataset.spkr_symmap,
extra_paths_by_spkr_name=train_dataset.paths_by_spkr_name,
)
val_dataset.interleaved_reorder_(cfg.get_spkr)
val_dataset.head_(cfg.max_num_val)
return train_dataset, val_dataset
def create_train_val_dataloader():
train_dataset, val_dataset = create_datasets()
train_dl = _create_dataloader(train_dataset, training=True)
val_dl = _create_dataloader(val_dataset, training=False)
_logger.info(str(train_dataset.phone_symmap))
_logger.info(str(train_dataset.spkr_symmap))
_logger.info(f"#samples (train): {len(train_dataset)}.")
_logger.info(f"#samples (val): {len(val_dataset)}.")
subtrain_dataset = copy.deepcopy(train_dataset)
subtrain_dataset.interleaved_reorder_(cfg.get_spkr)
subtrain_dataset.head_(cfg.max_num_val)
subtrain_dataset.training_(False)
subtrain_dl = _create_dataloader(subtrain_dataset, training=False)
assert isinstance(subtrain_dl.dataset, VALLEDatset)
return train_dl, subtrain_dl, val_dl
if __name__ == "__main__":
train_dl, subtrain_dl, val_dl = create_train_val_dataloader()
sample = train_dl.dataset[0]
print(sample)
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/data.py |
EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/emb/__init__.py |
|
import argparse
import random
from functools import cache
from pathlib import Path
import soundfile
import torch
import torchaudio
from einops import rearrange
from encodec import EncodecModel
from encodec.utils import convert_audio
from torch import Tensor
from tqdm import tqdm
from ..config import cfg
@cache
def _load_model(device="cuda"):
# Instantiate a pretrained EnCodec model
assert cfg.sample_rate == 24_000
model = EncodecModel.encodec_model_24khz()
model.set_target_bandwidth(6.0)
model.to(device)
return model
def unload_model():
return _load_model.cache_clear()
@torch.inference_mode()
def decode(codes: Tensor, device="cuda"):
"""
Args:
codes: (b q t)
"""
assert codes.dim() == 3
model = _load_model(device)
return model.decode([(codes, None)]), model.sample_rate
def decode_to_file(resps: Tensor, path: Path):
assert resps.dim() == 2, f"Require shape (t q), but got {resps.shape}."
resps = rearrange(resps, "t q -> 1 q t")
wavs, sr = decode(resps)
soundfile.write(str(path), wavs.cpu()[0, 0], sr)
def _replace_file_extension(path, suffix):
return (path.parent / path.name.split(".")[0]).with_suffix(suffix)
@torch.inference_mode()
def encode(wav: Tensor, sr: int, device="cuda"):
"""
Args:
wav: (t)
sr: int
"""
model = _load_model(device)
wav = wav.unsqueeze(0)
wav = convert_audio(wav, sr, model.sample_rate, model.channels)
wav = wav.to(device)
encoded_frames = model.encode(wav)
qnt = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1) # (b q t)
return qnt
def encode_from_file(path, device="cuda"):
wav, sr = torchaudio.load(str(path))
if wav.shape[0] == 2:
wav = wav[:1]
return encode(wav, sr, device)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("folder", type=Path)
parser.add_argument("--suffix", default=".wav")
args = parser.parse_args()
paths = [*args.folder.rglob(f"*{args.suffix}")]
random.shuffle(paths)
for path in tqdm(paths):
out_path = _replace_file_extension(path, ".qnt.pt")
if out_path.exists():
continue
qnt = encode_from_file(path)
torch.save(qnt.cpu(), out_path)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/emb/qnt.py |
import argparse
import random
import string
from functools import cache
from pathlib import Path
import torch
from g2p_en import G2p
from tqdm import tqdm
@cache
def _get_model():
return G2p()
@cache
def _get_graphs(path):
with open(path, "r") as f:
graphs = f.read()
return graphs
def encode(graphs: str) -> list[str]:
g2p = _get_model()
phones = g2p(graphs)
ignored = {" ", *string.punctuation}
return ["_" if p in ignored else p for p in phones]
@torch.no_grad()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("folder", type=Path)
parser.add_argument("--suffix", type=str, default=".normalized.txt")
args = parser.parse_args()
paths = list(args.folder.rglob(f"*{args.suffix}"))
random.shuffle(paths)
for path in tqdm(paths):
phone_path = path.with_name(path.stem.split(".")[0] + ".phn.txt")
if phone_path.exists():
continue
graphs = _get_graphs(path)
phones = encode(graphs)
with open(phone_path, "w") as f:
f.write(" ".join(phones))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/emb/g2p.py |
from ..config import cfg
from .ar import AR
from .nar import NAR
def get_model(name):
name = name.lower()
if name.startswith("ar"):
Model = AR
elif name.startswith("nar"):
Model = NAR
else:
raise ValueError("Model name should start with AR or NAR.")
if "-quarter" in name:
model = Model(
cfg.num_tokens,
d_model=256,
n_heads=4,
n_layers=12,
)
elif "-half" in name:
model = Model(
cfg.num_tokens,
d_model=512,
n_heads=8,
n_layers=12,
)
else:
if name not in ["ar", "nar"]:
raise NotImplementedError(name)
model = Model(
cfg.num_tokens,
d_model=1024,
n_heads=16,
n_layers=12,
)
return model
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/vall_e/__init__.py |
import torch
from torch import Tensor
from .base import Base
class NAR(Base):
@property
def n_resp_levels(self):
return 7
@property
def casual(self):
return False
@property
def use_stop_token(self):
return False
@property
def norm_type(self):
return "adaln"
@property
def resp_loss_only(self):
return True
def forward(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
resps_list: list[Tensor],
sampling_temperature: float = 0.2,
):
"""
Args:
text_list: [t] * b
proms_list: [t' l] * b, l=8
resps_list: [t'' l] * b, l=1 or 8, 1 for testing and 8 for training.
Returns:
[t'' l], l=8 if testing. empty list will be returned during training.
"""
n_levels_set = {r.shape[-1] for r in resps_list}
if len(n_levels_set) > 1:
raise ValueError(f"Please give only one level, got {n_levels_set}.")
n_levels = next(iter(n_levels_set))
device = text_list[0].device
if n_levels == self.n_resp_levels + 1:
assert resps_list is not None
quant_levels = torch.randint(0, self.n_resp_levels, (len(resps_list),))
prev_list = [o[..., : l + 1] for o, l in zip(resps_list, quant_levels)]
targ_list = [o[..., l + 1] for o, l in zip(resps_list, quant_levels)]
quant_levels = quant_levels.to(device=device)
_ = super().forward(
text_list,
proms_list,
prev_list,
targ_list,
return_all_resp=True,
shift_targ_list=False,
quant_levels=quant_levels,
)
# Yes, just nothing as we are training
prev_list = []
else:
prev_list = resps_list
while True:
level = prev_list[0].shape[-1] - 1
if level >= self.n_resp_levels:
break
quant_levels = torch.full((len(text_list),), level, device=device)
resp_list = super().forward(
text_list,
proms_list,
prev_list,
return_all_resp=True,
shift_targ_list=False,
quant_levels=quant_levels,
sampling_temperature=sampling_temperature,
)
prev_list = [
torch.cat([rs, r.unsqueeze(-1)], dim=-1)
for rs, r in zip(prev_list, resp_list)
]
return prev_list
def example_usage():
from functools import partial
from pathlib import Path
from einops import repeat
from ..emb.qnt import decode_to_file
from ..utils import gather_attribute
device = "cuda"
resps = torch.load("data/test/test.qnt.pt")[0].to(device)
num_qnts = 1024
model = NAR(num_qnts).to(device)
text_list = [
torch.tensor([2, 3], device=device),
]
x8 = partial(repeat, pattern="t -> t l", l=8)
proms_list = [
x8(torch.tensor([2, 3], device=device)),
]
resps_x1_list = [
resps[:1].t().to(device),
]
resps_x8_list = [
resps.t().to(device),
]
codes = model(
text_list,
proms_list,
resps_list=resps_x1_list,
sampling_temperature=0.2,
)[0]
decode_to_file(
codes,
Path("data/test/test.nar.init.wav"),
)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
for i in range(200):
optimizer.zero_grad()
_ = model(text_list, proms_list, resps_list=resps_x8_list)
losses = gather_attribute(model, "loss")
loss = sum(losses.values())
loss.backward()
optimizer.step()
if i % 20 == 0:
stats = {k: v.item() for k, v in losses.items()}
stats["loss"] = loss.item()
print(f"iter={i}, {stats}.")
for i in range(1, 8):
resps_list = [
resps[:i].t().to(device),
]
codes = model(
text_list,
proms_list,
resps_list=resps_list,
sampling_temperature=0.2,
)[0]
decode_to_file(
codes,
Path(f"data/test/test.nar.1-{i}.wav"),
)
if __name__ == "__main__":
example_usage()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/vall_e/nar.py |
import torch
from einops import rearrange
from torch import Tensor
from tqdm import trange
from .base import Base
class AR(Base):
@property
def n_resp_levels(self):
return 1
@property
def casual(self):
return True
@property
def use_stop_token(self):
return True
@property
def norm_type(self):
return "ln"
@property
def resp_loss_only(self):
return False
def _prune(self, l: Tensor):
indices = (l == self.stop_token).nonzero()
if len(indices) == 0:
return l
return l[: indices.min().item()]
@staticmethod
def _unsqueeze_list(x_list, axis=-1):
return [x.unsqueeze(dim=axis) for x in x_list]
def forward(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
resp_list: list[Tensor] | None = None,
max_steps: int = 1000,
sampling_temperature: float = 1.0,
):
if resp_list is not None:
return super().forward(
text_list,
proms_list,
self._unsqueeze_list(resp_list),
resp_list,
quant_levels=None,
shift_targ_list=True,
return_all_resp=False,
)
else:
return self._generate(
text_list,
proms_list,
max_steps,
sampling_temperature,
)
def _generate(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
max_steps: int,
sampling_temperature: float,
):
device = text_list[0].device
resp_list: list[Tensor] = [
torch.zeros(0, device=device).long() for _ in text_list
]
stopped = torch.zeros(len(text_list), device=device).bool()
for _ in trange(max_steps):
r = super().forward(
text_list,
proms_list,
self._unsqueeze_list(resp_list),
sampling_temperature=sampling_temperature,
)
stopped |= r == self.stop_token
for i, ri in enumerate(r):
resp_list[i] = torch.cat([resp_list[i], ri[None]])
if stopped.all().item():
break
pruned = [self._prune(r) for r in resp_list]
return pruned
def example_usage():
from functools import partial
import soundfile
from einops import repeat
device = "cuda"
qnt = torch.load("data/test/test.qnt.pt")[0, 0].to(device)
num_qnts = 1024
model = AR(num_qnts).to(device)
text_list = [
torch.tensor([1, 2, 3], device=device),
torch.tensor([2, 3], device=device),
]
x8 = partial(repeat, pattern="t -> t l", l=8)
proms_list = [
x8(torch.tensor([1, 2, 3], device=device)),
x8(torch.tensor([2, 3], device=device)),
]
resp_list = [
torch.tensor([1, 2, 3], device=device),
qnt.to(device),
]
out = model(text_list, proms_list, max_steps=200)
print(out)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
for i in range(100):
optimizer.zero_grad()
_ = model(text_list, proms_list, resp_list)
losses = model.loss
sum(losses.values()).backward()
optimizer.step()
if i % 20 == 0:
print(f"iter={i}, {losses}.")
out = model(text_list, proms_list, max_steps=200)
print(qnt)
print(out)
from ..emb.qnt import decode
codes = rearrange(out[1], "t -> 1 1 t")
wavs, sr = decode(codes)
soundfile.write("data/test/test.ar.recon.wav", wavs.cpu()[0, 0], sr)
if __name__ == "__main__":
example_usage()
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/vall_e/ar.py |
import math
from functools import partial
from typing import Literal, overload
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import Tensor, einsum, nn
from torch.distributions import Categorical
from torch.nn.utils.rnn import pad_sequence
from torch.utils.checkpoint import checkpoint
def _create_mask(l, device):
"""1 is valid region and 0 is invalid."""
seq = torch.arange(max(l), device=device).unsqueeze(0) # (1 t)
stop = torch.tensor(l, device=device).unsqueeze(1) # (b 1)
return (seq < stop).float() # (b t)
def list_to_tensor(x_list: list[Tensor], pattern="t b c -> b t c"):
"""
Args:
x_list: [(t d)]
Returns:
x: (? ? ?)
m: (? ? ?), same as x
"""
l = list(map(len, x_list))
x = rearrange(pad_sequence(x_list), pattern)
m = _create_mask(l, x_list[0].device)
m = m.t().unsqueeze(-1) # (t b 1)
m = rearrange(m, pattern)
m = m.to(x)
return x, m
class SinusodialEmbedding(nn.Module):
def __init__(self, d_model):
super().__init__()
self.d_model = d_model
exponent = torch.arange(self.d_half, dtype=torch.float32)
exponent = exponent / self.d_half
omega = torch.exp(-math.log(1e4) * exponent)
self.omega: torch.Tensor
self.register_buffer("omega", omega, persistent=False)
@property
def d_half(self):
assert self.d_model % 2 == 0, "Only support even d_model."
return self.d_model // 2
def forward(self, x):
"""
Args:
x: (...)
Returns:
pe: (... d)
"""
omega = self.omega
while omega.dim() <= x.dim():
omega = omega.unsqueeze(0) # (... d)
x = x.unsqueeze(-1) # (... 1)
x = omega * x
x = torch.cat([x.sin(), x.cos()], dim=-1)
return x
def get_pe(self, n: int):
"""
Args:
n: int
Returns:
pe: (n d)
"""
device = self.omega.device
return self.forward(torch.arange(n, device=device))
def add_pe(self, x):
"""
Args:
x: (b t c)
"""
e = self.get_pe(x.shape[1]) # t d
e = e[None] # b t d
x = x + e
return x
class Attention(nn.Module):
def __init__(self, d_model, n_heads, casual):
super().__init__()
assert d_model % n_heads == 0
dim_head = d_model // n_heads
self.casual = casual
self.n_heads = n_heads
self.scale = dim_head**-0.5
self.to_qkv = nn.Linear(d_model, d_model * 3, bias=False)
self.to_out = nn.Linear(d_model, d_model)
def forward(self, x, m):
"""
Args:
x: (b t c)
m: (b t c), 1 is data, 0 is padding
Returns:
x: (b t c)
"""
h = self.n_heads
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, "b t (h d) -> b t h d", h=h), (q, k, v))
e = einsum("b i h d, b j h d -> b i j h", q, k)
e = e * self.scale
kpm = m.unsqueeze(1) * m.unsqueeze(2) # b i j 1
if self.casual:
kpm = kpm.squeeze(-1).tril().unsqueeze(-1) # b i j 1
e = e.masked_fill(kpm == 0, -torch.finfo(e.dtype).max)
a = e.softmax(dim=2) # Normalize on j, i.e. key
o = einsum("b i j h, b j h d -> b i h d", a, v)
o = o.flatten(-2)
o = self.to_out(o) # b t c
o = o * m
return o
class AdaLN(nn.Module):
def __init__(self, d_model, n_levels, eps=1e-5, k=0.1, c=2):
super().__init__()
self.eps = eps
self.emb = nn.Embedding(n_levels, d_model * 2)
self.k = k
self.c = c
nn.init.zeros_(self.emb.weight)
def forward(self, x, l):
logγ, β = self.emb(l).unsqueeze(1).chunk(2, dim=-1)
h = F.layer_norm(x, x.shape[-1:], eps=self.eps)
# The initial implementation (https://github.com/enhuiz/vall-e/blob/fbf023448c08e55c0422eefed7fc234cf8b76680/vall_e/vall_e/base.py#L135)
# performed worse than vanilla LayerNorm.
# The authors mentioned another AdaNorm paper (https://openreview.net/pdf?id=HyxndNrxLB) as they introduce AdaLN.
# Did they use AdaNorm inside AdaLN? (as follows)
h = self.c * (1 - (self.k * h).detach()) * h
y = logγ.exp() * h + β
return y
class PrenormResidual(nn.Module):
def __init__(
self,
block,
d_model,
p_dropout,
requires_mask=False,
norm_type="ln",
n_levels: int | None = None,
):
super().__init__()
self.block = block
self.requires_mask = requires_mask
self.norm_type = norm_type
if norm_type == "ln":
self.norm = nn.LayerNorm(d_model)
elif norm_type == "adaln":
assert n_levels is not None
self.norm = AdaLN(d_model, n_levels)
else:
raise NotImplementedError(norm_type)
self.dropout = nn.Dropout(p_dropout)
def forward(self, x, m, l):
"""
Args:
x: input (b t d)
m: mask (b t 1), 1 is valuable and 0 is padding
l: level to use, required only for AdaLN
"""
nopts = {"l": l} if self.norm_type == "adaln" else {}
bopts = {"m": m} if self.requires_mask else {}
x = x + self.dropout(self.block(self.norm(x, **nopts) * m, **bopts))
return x * m
class Block(nn.Sequential):
def __init__(self, d_model, n_heads, p_dropout, casual, norm_type, n_levels):
super().__init__()
self.attn = PrenormResidual(
Attention(d_model, n_heads, casual),
d_model=d_model,
p_dropout=p_dropout,
requires_mask=True,
norm_type=norm_type,
n_levels=n_levels,
)
self.ffn = PrenormResidual(
nn.Sequential(
nn.Linear(d_model, d_model * 4),
nn.GELU(),
nn.Dropout(p_dropout),
nn.Linear(d_model * 4, d_model),
),
d_model=d_model,
p_dropout=p_dropout,
norm_type=norm_type,
n_levels=n_levels,
)
def forward(self, x, m, l):
"""
Args:
x: (b t c)
m: (b t 1)
l: (b)
"""
poor_in_vram = True
if x.requires_grad and poor_in_vram:
x = checkpoint(self.attn, x, m, l)
else:
x = self.attn(x, m, l)
x = self.ffn(x, m, l)
return x
class Embedding(nn.Embedding):
def forward(self, x_list: list[Tensor]) -> list[Tensor]:
if len(x_list) == 0:
return []
return super().forward(torch.cat(x_list)).split([*map(len, x_list)])
class MultiEmbedding(nn.Module):
"""
This embedding sums embeddings on different levels.
"""
def __init__(self, max_n_levels, n_tokens, token_dim):
super().__init__()
self.max_n_levels = max_n_levels
self.n_tokens = n_tokens
self.weight = nn.Parameter(torch.randn(max_n_levels, n_tokens, token_dim))
def forward(self, x_list: list[Tensor]) -> list[Tensor]:
if len(x_list) == 0:
return []
w = self.weight
padded_x_list = []
for xi in x_list:
xi = F.one_hot(xi, num_classes=self.n_tokens) # t l' k
xi = F.pad(xi, (0, 0, 0, w.shape[0] - xi.shape[1])) # t l k
padded_x_list.append(xi.to(w))
x = torch.cat(padded_x_list) # n l k
x = einsum("l k d, n l k -> n d", w, x)
x_list = x.split([*map(len, x_list)])
return x_list
def _join(x: tuple[Tensor], sep: Tensor):
"""
Args:
x: (k t d)
sep: (d)
"""
ret = x[0]
for i in range(1, len(x)):
ret = torch.cat((ret, sep[None], x[i]), dim=0)
return ret
class Base(nn.Module):
@property
def casual(self) -> bool:
raise NotImplementedError
@property
def n_resp_levels(self) -> int:
raise NotImplementedError
@property
def use_stop_token(self) -> bool:
raise NotImplementedError
@property
def norm_type(self):
raise NotImplementedError
@property
def n_prom_levels(self) -> int:
return 8
@property
def resp_loss_only(self):
raise NotImplementedError
def __init__(
self,
n_tokens: int,
d_model: int = 512,
n_heads: int = 8,
n_layers: int = 12,
p_dropout: float = 0.1,
):
super().__init__()
self.n_tokens = n_tokens
casual = self.casual
# +1 to include the stop token
n_stop_tokens = 1 if self.use_stop_token else 0
n_resp_tokens = n_tokens + n_stop_tokens
self.text_emb = Embedding(n_tokens, d_model)
# Here I simply use all prom levels
self.proms_emb = MultiEmbedding(self.n_prom_levels, n_tokens, d_model)
self.resps_emb = MultiEmbedding(self.n_resp_levels, n_resp_tokens, d_model)
self.sin_emb = SinusodialEmbedding(d_model)
self.sep = nn.Parameter(torch.randn(d_model))
blocks = [
Block(
d_model=d_model,
n_heads=n_heads,
p_dropout=p_dropout,
casual=casual,
norm_type=self.norm_type,
n_levels=self.n_resp_levels,
)
for _ in range(n_layers)
]
self.blocks = nn.ModuleList(blocks)
self.classifier = nn.Linear(d_model, n_resp_tokens)
@property
def stop_token(self):
if not self.use_stop_token:
raise ValueError("Not using stop token!")
return self.n_tokens
@property
def ignore_index(self):
return -100
@staticmethod
def _samplewise_merge_tensors(*l, sep: Tensor | None):
if sep is None:
cat = torch.cat
else:
cat = partial(_join, sep=sep)
return [*map(cat, zip(*l))]
@overload
def forward(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
resps_list: list[Tensor],
targ_list: list[Tensor] | None = None,
quant_levels: Tensor | None = None,
shift_targ_list: bool = False,
return_all_resp: Literal[False] = False,
sampling_temperature: float = 1.0,
) -> Tensor:
...
@overload
def forward(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
resps_list: list[Tensor],
targ_list: list[Tensor] | None = None,
quant_levels: Tensor | None = None,
shift_targ_list: bool = False,
return_all_resp: Literal[True] = True,
sampling_temperature: float = 1.0,
) -> list[Tensor]:
...
def forward(
self,
text_list: list[Tensor],
proms_list: list[Tensor],
resps_list: list[Tensor],
targ_list: list[Tensor] | None = None,
quant_levels: Tensor | None = None,
shift_targ_list: bool = False,
return_all_resp: bool = False,
sampling_temperature: float = 1.0,
):
"""
Args:
text_list: [t] * b
proms_list: [t' l] * b, l quantization levels.
resps_list: [t'' l] * b, l quantization levels.
targ_list: [t''] * b, one quantization level only, when given, loss will be computed
quant_levels: specify which quant_levels to feed forward, used in NAR mode.
shift_targ_list: whether to shift target list when computing loss. True if AR.
return_all_resp: True if NAR.
sampling_temperature: a lower temperature makes the result more robust but less diverse.
Returns:
y: sampled tokens
"""
x_list = self._samplewise_merge_tensors(
self.text_emb(text_list),
self.proms_emb(proms_list),
self.resps_emb(resps_list),
sep=self.sep,
)
x, m = list_to_tensor(x_list)
x = self.sin_emb.add_pe(x)
for block in self.blocks:
x = block(x, m, quant_levels)
h = self.classifier(x) * m
# Remove padding
h_list = [hi[:li] for hi, li in zip(h, map(len, x_list))]
if targ_list is not None:
if any([l == 0 for l in map(len, targ_list)]):
raise ValueError("Cannot compute loss given empty targ_list.")
device = h.device
ignore_sep = torch.tensor(self.ignore_index, device=device)
# Ignore prom in the target
prom_list = [
torch.full_like(t[..., 0], self.ignore_index) for t in proms_list
]
text_prom_list = self._samplewise_merge_tensors(
text_list, prom_list, sep=ignore_sep
)
# Make every token earlier as it is future that is unknown
# If we don't want compute loss, set all to ignored
for i in range(len(text_prom_list)):
if self.resp_loss_only:
text_prom_list[i][:] = self.ignore_index
else:
text_prom_list[i] = text_prom_list[i].roll(-1, dims=0)
text_prom_list[i][-1] = self.ignore_index
if shift_targ_list:
# Also make target earlier if in autoregressive mode
targ_list = [*targ_list]
for i in range(len(targ_list)):
targ_list[i] = targ_list[i].roll(-1, dims=0)
targ_list[i][-1] = self.stop_token
y_list = self._samplewise_merge_tensors(
text_prom_list, targ_list, sep=ignore_sep
)
self.loss = dict(
nll=F.cross_entropy(
torch.cat(h_list),
torch.cat(y_list),
ignore_index=self.ignore_index,
)
)
if return_all_resp:
logits = [hi[-li:] for hi, li in zip(h_list, map(len, resps_list))]
ret = [
Categorical(logits=hi / sampling_temperature).sample() for hi in logits
]
else:
logits = torch.stack([hi[-1] for hi in h_list])
ret = Categorical(logits=logits / sampling_temperature).sample()
return ret
| EXA-1-master | exa/models/valle/vall-e-main 2/vall_e/vall_e/base.py |
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio_img'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio_inpaint'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural'))
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
AUDIO_CHATGPT_PREFIX = """AudioGPT
AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files.
AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated.
Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
TOOLS:
------
AudioGPT has access to the following tools:"""
AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists.
You will remember to provide the audio file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Thought: Do I need to use a tool? {agent_scratchpad}"""
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
else:
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs)
def merge_audio(audio_path_1, audio_path_2):
merged_signal = []
sr_1, signal_1 = wavfile.read(audio_path_1)
sr_2, signal_2 = wavfile.read(audio_path_2)
merged_signal.append(signal_1)
merged_signal.append(signal_2)
merged_signal = np.hstack(merged_signal)
merged_signal = np.asarray(merged_signal, dtype=np.int16)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, sr_2, merged_signal)
return audio_filename
class T2I:
def __init__(self, device):
from transformers import AutoModelForCausalLM, AutoTokenizer
from diffusers import StableDiffusionPipeline
from transformers import pipeline
print("Initializing T2I to %s" % device)
self.device = device
self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
self.pipe.to(device)
def inference(self, text):
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
print(f'{text} refined to {refined_text}')
image = self.pipe(refined_text).images[0]
image.save(image_filename)
print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
return image_filename
class ImageCaptioning:
def __init__(self, device):
from transformers import BlipProcessor, BlipForConditionalGeneration
print("Initializing ImageCaptioning to %s" % device)
self.device = device
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
def inference(self, image_path):
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
out = self.model.generate(**inputs)
captions = self.processor.decode(out[0], skip_special_tokens=True)
return captions
class T2A:
def __init__(self, device):
print("Initializing Make-An-Audio to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
SAMPLE_RATE = 16000
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
c = self.sampler.model.get_learned_conditioning(n_samples * [text])
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S = ddim_steps,
conditioning = c,
batch_size = n_samples,
shape = shape,
verbose = False,
unconditional_guidance_scale = scale,
unconditional_conditioning = uc,
x_T = start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = self.select_best_audio(text, wav_list)
return best_wav
def select_best_audio(self, prompt, wav_list):
from wav_evaluation.models.CLAPWrapper import CLAPWrapper
clap_model = CLAPWrapper('useful_ckpts/CLAP/CLAP_weights_2022.pth', 'useful_ckpts/CLAP/config.yml',
use_cuda=torch.cuda.is_available())
text_embeddings = clap_model.get_text_embeddings([prompt])
score_list = []
for data in wav_list:
sr, wav = data
audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True)
score = clap_model.compute_similarity(audio_embeddings, text_embeddings,
use_logit_scale=False).squeeze().cpu().numpy()
score_list.append(score)
max_index = np.array(score_list).argmax()
print(score_list, max_index)
return wav_list[max_index]
def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.txt2audio(
text = text,
H = melbins,
W = mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}")
return audio_filename
class I2A:
def __init__(self, device):
print("Initializing Make-An-Audio-Image to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio_img/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio_img/useful_ckpts/ta54_epoch=000216.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio_img/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
SAMPLE_RATE = 16000
n_samples = 1 # only support 1 sample
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
#image = Image.fromarray(image)
image = Image.open(image)
image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0)
image_embedding = self.sampler.model.cond_stage_model.forward_img(image)
c = image_embedding.repeat(n_samples, 1, 1)
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=n_samples,
shape=shape,
verbose=False,
unconditional_guidance_scale=scale,
unconditional_conditioning=uc,
x_T=start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = wav_list[0]
return best_wav
def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.img2audio(
image=image,
H=melbins,
W=mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}")
return audio_filename
class TTS:
def __init__(self, device=None):
from inference.tts.PortaSpeech import TTSInference
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing PortaSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/ps_adv_baseline'
self.set_model_hparams()
self.inferencer = TTSInference(self.hp, device)
def set_model_hparams(self):
set_hparams(exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, text):
self.set_model_hparams()
inp = {"text": text}
out = self.inferencer.infer_once(inp)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, out, samplerate=22050)
return audio_filename
class T2S:
def __init__(self, device= None):
from inference.svs.ds_e2e import DiffSingerE2EInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing DiffSinger to %s" % device)
self.device = device
self.exp_name = 'checkpoints/0831_opencpop_ds1000'
self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml'
self.set_model_hparams()
self.pipe = DiffSingerE2EInfer(self.hp, device)
self.default_inp = {
'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
}
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try:
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.pipe.infer_once(inp)
#if inputs == '' or len(val) < len(key):
# inp = self.default_inp
#else:
# inp = {k:v for k,v in zip(key,val)}
#wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(f"Processed T2S.run, audio_filename: {audio_filename}")
return audio_filename
class t2s_VISinger:
def __init__(self, device=None):
from espnet2.bin.svs_inference import SingingGenerate
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing VISingere to %s" % device)
tag = 'AQuarterMile/opencpop_visinger1'
self.model = SingingGenerate.from_pretrained(
model_tag=str_or_none(tag),
device=device,
)
phn_dur = [[0. , 0.219 ],
[0.219 , 0.50599998],
[0.50599998, 0.71399999],
[0.71399999, 1.097 ],
[1.097 , 1.28799999],
[1.28799999, 1.98300004],
[1.98300004, 7.10500002],
[7.10500002, 7.60400009]]
phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP']
score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']]
tempo = 70
tmp = {}
tmp["label"] = phn_dur, phn
tmp["score"] = tempo, score
self.default_inp = tmp
def inference(self, inputs):
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try: # TODO: input will be update
inp = {k: v for k, v in zip(key, val)}
wav = self.model(text=inp)["wav"]
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.model(text=inp)["wav"]
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, wav, samplerate=self.model.fs)
return audio_filename
class TTS_OOD:
def __init__(self, device):
from inference.tts.GenerSpeech import GenerSpeechInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing GenerSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/GenerSpeech'
self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml'
self.set_model_hparams()
self.pipe = GenerSpeechInfer(self.hp, device)
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy'
if os.path.exists(f0_stats_fn):
hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn)
hp['f0_mean'] = float(hp['f0_mean'])
hp['f0_std'] = float(hp['f0_std'])
hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt'
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
key = ['ref_audio', 'text']
val = inputs.split(",")
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(
f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}")
return audio_filename
class Inpaint:
def __init__(self, device):
print("Initializing Make-An-Audio-inpaint to %s" % device)
self.device = device
self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio_inpaint/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio_inpaint/useful_ckpts/inpaint7_epoch00047.ckpt')
self.vocoder = VocoderBigVGAN('./vocoder/logs/bigv16k53w',device=device)
self.cmap_transform = matplotlib.cm.viridis
def _initialize_model_inpaint(self, config, ckpt):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
print(model.device, device, model.cond_stage_model.device)
sampler = DDIMSampler(model)
return sampler
def make_batch_sd(self, mel, mask, num_samples=1):
mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
masked_mel = (1 - mask) * mel
mel = mel * 2 - 1
mask = mask * 2 - 1
masked_mel = masked_mel * 2 -1
batch = {
"mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
"mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples),
"masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
}
return batch
def gen_mel(self, input_audio_path):
SAMPLE_RATE = 16000
sr, ori_wav = wavfile.read(input_audio_path)
print("gen_mel")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def gen_mel_audio(self, input_audio):
SAMPLE_RATE = 16000
sr,ori_wav = input_audio
print("gen_mel_audio")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def show_mel_fn(self, input_audio_path):
crop_len = 500
crop_mel = self.gen_mel(input_audio_path)[:,:crop_len]
color_mel = self.cmap_transform(crop_mel)
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
return image_filename
def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
model = self.sampler.model
prng = np.random.RandomState(seed)
start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
cc = torch.nn.functional.interpolate(batch["mask"],
size=c.shape[-2:])
c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
shape = (c.shape[1]-1,)+c.shape[2:]
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=c.shape[0],
shape=shape,
verbose=False)
x_samples_ddim = model.decode_first_stage(samples_ddim)
mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
inpainted = (1-mask)*mel+mask*predicted_mel
inpainted = inpainted.cpu().numpy().squeeze()
inapint_wav = self.vocoder.vocode(inpainted)
return inpainted, inapint_wav
def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100):
SAMPLE_RATE = 16000
torch.set_grad_enabled(False)
mel_img = Image.open(mel_and_mask['image'])
mask_img = Image.open(mel_and_mask["mask"])
show_mel = np.array(mel_img.convert("L"))/255
mask = np.array(mask_img.convert("L"))/255
mel_bins,mel_len = 80,848
input_mel = self.gen_mel_audio(input_audio)[:,:mel_len]
mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)
print(mask.shape,input_mel.shape)
with torch.no_grad():
batch = self.make_batch_sd(input_mel,mask,num_samples=1)
inpainted,gen_wav = self.inpaint(
batch=batch,
seed=seed,
ddim_steps=ddim_steps,
num_samples=1,
H=mel_bins, W=mel_len
)
inpainted = inpainted[:,:show_mel.shape[1]]
color_mel = self.cmap_transform(inpainted)
input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, gen_wav, samplerate = 16000)
return image_filename, audio_filename
class ASR:
def __init__(self, device):
print("Initializing Whisper to %s" % device)
self.device = device
self.model = whisper.load_model("base", device=device)
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(self.device)
_, probs = self.model.detect_language(mel)
options = whisper.DecodingOptions()
result = whisper.decode(self.model, mel, options)
return result.text
def translate_english(self, audio_path):
audio = self.model.transcribe(audio_path, language='English')
return audio['text']
class A2T:
def __init__(self, device):
from audio_to_text.inference_waveform import AudioCapModel
print("Initializing Audio-To-Text Model to %s" % device)
self.device = device
self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm")
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
caption_text = self.model(audio)
return caption_text[0]
class GeneFace:
def __init__(self, device=None):
print("Initializing GeneFace model to %s" % device)
from audio_to_face.GeneFace_binding import GeneFaceInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.geneface_model = GeneFaceInfer(device)
print("Loaded GeneFace model")
def inference(self, audio_path):
audio_base_name = os.path.basename(audio_path)[:-4]
out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4")
inp = {
'audio_source_name': audio_path,
'out_npy_name': f'geneface/tmp/{audio_base_name}.npy',
'cond_name': f'geneface/tmp/{audio_base_name}.npy',
'out_video_name': out_video_name,
'tmp_imgs_dir': f'video/tmp_imgs',
}
self.geneface_model.infer_once(inp)
return out_video_name
class SoundDetection:
def __init__(self, device):
self.device = device
self.sample_rate = 32000
self.window_size = 1024
self.hop_size = 320
self.mel_bins = 64
self.fmin = 50
self.fmax = 14000
self.model_type = 'PVT'
self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth'
self.classes_num = detection_config.classes_num
self.labels = detection_config.labels
self.frames_per_second = self.sample_rate // self.hop_size
# Model = eval(self.model_type)
self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size,
hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,
classes_num=self.classes_num)
checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
self.model.load_state_dict(checkpoint['model'])
self.model.to(device)
def inference(self, audio_path):
# Forward
(waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = torch.from_numpy(waveform)
waveform = waveform.to(self.device)
# Forward
with torch.no_grad():
self.model.eval()
batch_output_dict = self.model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
# print('Sound event detection result (time_steps x classes_num): {}'.format(
# framewise_output.shape))
import numpy as np
import matplotlib.pyplot as plt
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size,
hop_length=self.hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
plt.savefig(image_filename)
return image_filename
class SoundExtraction:
def __init__(self, device):
from sound_extraction.model.LASSNet import LASSNet
from sound_extraction.utils.stft import STFT
import torch.nn as nn
self.device = device
self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt'
self.stft = STFT()
self.model = nn.DataParallel(LASSNet(device)).to(device)
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['model'])
self.model.eval()
def inference(self, inputs):
#key = ['ref_audio', 'text']
from sound_extraction.utils.wav_io import load_wav, save_wav
val = inputs.split(",")
audio_path = val[0] # audio_path, text
text = val[1]
waveform = load_wav(audio_path)
waveform = torch.tensor(waveform).transpose(1,0)
mixed_mag, mixed_phase = self.stft.transform(waveform)
text_query = ['[CLS] ' + text]
mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device)
est_mask = self.model(mixed_mag, text_query)
est_mag = est_mask * mixed_mag
est_mag = est_mag.squeeze(1)
est_mag = est_mag.permute(0, 2, 1)
est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase)
est_wav = est_wav.squeeze(0).squeeze(0).numpy()
#est_path = f'output/est{i}.wav'
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
print('audio_filename ', audio_filename)
save_wav(est_wav, audio_filename)
return audio_filename
class Binaural:
def __init__(self, device):
from src.models import BinauralNetwork
self.device = device
self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net'
self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions2.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions3.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions4.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions5.txt']
self.net = BinauralNetwork(view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
)
self.net.load_from_file(self.model_file)
self.sr = 48000
def inference(self, audio_path):
mono, sr = librosa.load(path=audio_path, sr=self.sr, mono=True)
mono = torch.from_numpy(mono)
mono = mono.unsqueeze(0)
import numpy as np
import random
rand_int = random.randint(0,4)
view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32)
view = torch.from_numpy(view)
if not view.shape[-1] * 400 == mono.shape[-1]:
mono = mono[:,:(mono.shape[-1]//400)*400] #
if view.shape[1]*400 > mono.shape[1]:
m_a = view.shape[1] - mono.shape[-1]//400
rand_st = random.randint(0,m_a)
view = view[:,m_a:m_a+(mono.shape[-1]//400)] #
# binauralize and save output
self.net.eval().to(self.device)
mono, view = mono.to(self.device), view.to(self.device)
chunk_size = 48000 # forward in chunks of 1s
rec_field = 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field
rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies
chunks = [
{
"mono": mono[:, max(0, i-rec_field):i+chunk_size],
"view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
}
for i in range(0, mono.shape[-1], chunk_size)
]
for i, chunk in enumerate(chunks):
with torch.no_grad():
mono = chunk["mono"].unsqueeze(0)
view = chunk["view"].unsqueeze(0)
binaural = self.net(mono, view).squeeze(0)
if i > 0:
binaural = binaural[:, -(mono.shape[-1]-rec_field):]
chunk["binaural"] = binaural
binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1)
binaural = torch.clamp(binaural, min=-1, max=1).cpu()
#binaural = chunked_forwarding(net, mono, view)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
import torchaudio
torchaudio.save(audio_filename, binaural, sr)
#soundfile.write(audio_filename, binaural, samplerate = 48000)
print(f"Processed Binaural.run, audio_filename: {audio_filename}")
return audio_filename
class TargetSoundDetection:
def __init__(self, device):
from target_sound_detection.src import models as tsd_models
from target_sound_detection.src.models import event_labels
self.device = device
self.MEL_ARGS = {
'n_mels': 64,
'n_fft': 2048,
'hop_length': int(22050 * 20 / 1000),
'win_length': int(22050 * 40 / 1000)
}
self.EPS = np.spacing(1)
self.clip_model, _ = clip.load("ViT-B/32", device=self.device)
self.event_labels = event_labels
self.id_to_event = {i : label for i, label in enumerate(self.event_labels)}
config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu')
config_parameters = dict(config)
config_parameters['tao'] = 0.6
if 'thres' not in config_parameters.keys():
config_parameters['thres'] = 0.5
if 'time_resolution' not in config_parameters.keys():
config_parameters['time_resolution'] = 125
model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'
, map_location=lambda storage, loc: storage) # load parameter
self.model = getattr(tsd_models, config_parameters['model'])(config_parameters,
inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args'])
self.model.load_state_dict(model_parameters)
self.model = self.model.to(self.device).eval()
self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')
self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')
def extract_feature(self, fname):
import soundfile as sf
y, sr = sf.read(fname, dtype='float32')
print('y ', y.shape)
ti = y.shape[0]/sr
if y.ndim > 1:
y = y.mean(1)
y = librosa.resample(y, sr, 22050)
lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T
return lms_feature,ti
def build_clip(self, text):
text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"]
text_features = self.clip_model.encode_text(text)
return text_features
def cal_similarity(self, target, retrievals):
ans = []
#target =torch.from_numpy(target)
for name in retrievals.keys():
tmp = retrievals[name]
#tmp = torch.from_numpy(tmp)
s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0)
ans.append(s.item())
return ans.index(max(ans))
def inference(self, text, audio_path):
from target_sound_detection.src.utils import median_filter, decode_with_timestamps
target_emb = self.build_clip(text) # torch type
idx = self.cal_similarity(target_emb, self.re_embeds)
target_event = self.id_to_event[idx]
embedding = self.ref_mel[target_event]
embedding = torch.from_numpy(embedding)
embedding = embedding.unsqueeze(0).to(self.device).float()
#print('embedding ', embedding.shape)
inputs,ti = self.extract_feature(audio_path)
#print('ti ', ti)
inputs = torch.from_numpy(inputs)
inputs = inputs.unsqueeze(0).to(self.device).float()
#print('inputs ', inputs.shape)
decision, decision_up, logit = self.model(inputs, embedding)
pred = decision_up.detach().cpu().numpy()
pred = pred[:,:,0]
frame_num = decision_up.shape[1]
time_ratio = ti / frame_num
filtered_pred = median_filter(pred, window_size=1, threshold=0.5)
#print('filtered_pred ', filtered_pred)
time_predictions = []
for index_k in range(filtered_pred.shape[0]):
decoded_pred = []
decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((target_event, 0, 0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
cur_pred = pred[num_batch]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
# print(label_prediction)
for event_label, onset, offset in label_prediction:
time_predictions.append({
'onset': onset*time_ratio,
'offset': offset*time_ratio,})
ans = ''
for i,item in enumerate(time_predictions):
ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + ' end_time: ' + str(item['offset']) + '\t'
#print(ans)
return ans
# class Speech_Enh_SS_SC:
# """Speech Enhancement or Separation in single-channel
# Example usage:
# enh_model = Speech_Enh_SS("cuda")
# enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
# """
# def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"):
# self.model_name = model_name
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=None,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path, ref_channel=0):
# speech, sr = soundfile.read(speech_path)
# speech = speech[:, ref_channel]
# assert speech.dim() == 1
# enh_speech = self.separate_speech(speech[None, ], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
# class Speech_Enh_SS_MC:
# """Speech Enhancement or Separation in multi-channel"""
# def __init__(self, device="cuda", model_name=None, ref_channel=4):
# self.model_name = model_name
# self.ref_channel = ref_channel
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=self.ref_channel,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path):
# speech, sr = soundfile.read(speech_path)
# speech = speech.T
# enh_speech = self.separate_speech(speech[None, ...], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
class Speech_Enh_SS_SC:
"""Speech Enhancement or Separation in single-channel
Example usage:
enh_model = Speech_Enh_SS("cuda")
enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
"""
def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet Enh to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path, ref_channel=0):
speech, sr = soundfile.read(speech_path)
speech = speech[:, ref_channel]
# speech = torch.from_numpy(speech)
# assert speech.dim() == 1
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr)
# return enh_speech[0]
# return enh_speech
# else:
# print("############")
# audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
# audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
# audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class Speech_SS:
def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet SS to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path):
speech, sr = soundfile.read(speech_path)
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0], samplerate=sr)
else:
# print("############")
audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class ConversationBot:
def __init__(self):
print("Initializing AudioGPT")
self.llm = OpenAI(temperature=0)
self.t2i = T2I(device="cuda:1")
self.i2t = ImageCaptioning(device="cuda:0")
self.t2a = T2A(device="cuda:0")
self.tts = TTS(device="cpu")
self.t2s = T2S(device="cpu")
self.i2a = I2A(device="cuda:0")
self.a2t = A2T(device="cpu")
self.asr = ASR(device="cuda:0")
self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0")
# self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0")
self.SS = Speech_SS(device="cuda:0")
self.inpaint = Inpaint(device="cuda:0")
self.tts_ood = TTS_OOD(device="cpu")
self.geneface = GeneFace(device="cuda:0")
self.detection = SoundDetection(device="cpu")
self.binaural = Binaural(device="cuda:0")
self.extraction = SoundExtraction(device="cuda:0")
self.TSD = TargetSoundDetection(device="cuda:0")
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
def init_tools(self, interaction_type):
if interaction_type == 'text':
self.tools = [
Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
"The input to this tool should be a string, representing the text used to generate image. "),
Tool(name="Get Photo Description", func=self.i2t.inference,
description="useful for when you want to know what is inside the photo. receives image_path as input. "
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
# Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), "
# "or separate each speech from the speech mixture (single-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference,
description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Separation In Single-Channel", func=self.SS.inference,
description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
# Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Audio From The Image", func=self.i2a.inference,
description="useful for when you want to generate an audio based on an image."
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn,
description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, "
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Transcribe Speech", func=self.asr.inference,
description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference,
description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
else:
self.tools = [
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
def run_text(self, text, state):
print("===============Running run_text =============")
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Transcribe Speech":
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
image_filename = res['intermediate_steps'][0][1]
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Audio Inpainting":
audio_filename = res['intermediate_steps'][0][0].tool_input
image_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
def run_image_or_audio(self, file, state, txt):
file_type = file.name[-3:]
if file_type == "wav":
print("===============Running run_audio =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# audio_load = whisper.load_audio(file.name)
audio_load, sr = soundfile.read(file.name)
soundfile.write(audio_filename, audio_load, samplerate = sr)
description = self.a2t.inference(audio_filename)
Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
#state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)]
state = state + [(f"*{audio_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False)
else:
print("===============Running run_image =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
print("======>Auto Resize Image...")
img = Image.open(file.name)
width, height = img.size
ratio = min(512 / width, 512 / height)
width_new, height_new = (round(width * ratio), round(height * ratio))
img = img.resize((width_new, height_new))
img = img.convert('RGB')
img.save(image_filename, "PNG")
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
description = self.i2t.inference(image_filename)
Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"*{image_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False)
def speech(self, speech_input, state):
input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
text = self.asr.translate_english(speech_input)
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(response)
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Transcribe Speech":
print("======>Current memory:\n %s" % self.agent.memory)
output_audio_filename = self.tts.inference(res['output'])
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
print("======>Current memory:\n %s" % self.agent.memory)
image_filename = res['intermediate_steps'][0][1]
output_audio_filename = self.tts.inference(res['output'])
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
Res = "The audio file has been generated and the audio is "
output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename)
print(output_audio_filename)
state = state + [(text, response)]
response = res['output']
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
def inpainting(self, state, audio_filename, image_filename):
print("===============Running inpainting =============")
print("Inputs:", state)
print("======>Previous memory:\n %s" % self.agent.memory)
new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename)
AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"*{new_image_filename}*"
output_audio_filename = self.tts.inference(AI_prompt)
self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"Audio Inpainting", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False)
def clear_audio(self):
return gr.Audio.update(value=None, visible=False)
def clear_input_audio(self):
return gr.Audio.update(value=None)
def clear_image(self):
return gr.Image.update(value=None, visible=False)
def clear_video(self):
return gr.Video.update(value=None, visible=False)
def clear_button(self):
return gr.Button.update(visible=False)
if __name__ == '__main__':
bot = ConversationBot()
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
with gr.Row():
gr.Markdown("## AudioGPT")
chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False)
state = gr.State([])
with gr.Row() as select_raws:
with gr.Column(scale=0.7):
interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type')
with gr.Column(scale=0.3, min_width=0):
select = gr.Button("Select")
with gr.Row(visible=False) as text_input_raws:
with gr.Column(scale=0.7):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
with gr.Column(scale=0.1, min_width=0):
run = gr.Button("🏃♂️Run")
with gr.Column(scale=0.1, min_width=0):
clear_txt = gr.Button("🔄Clear️")
with gr.Column(scale=0.1, min_width=0):
btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"])
with gr.Row():
outaudio = gr.Audio(visible=False)
with gr.Row():
with gr.Column(scale=0.3, min_width=0):
outvideo = gr.Video(visible=False)
with gr.Row():
show_mel = gr.Image(type="filepath",tool='sketch',visible=False)
with gr.Row():
run_button = gr.Button("Predict Masked Place",visible=False)
with gr.Row(visible=False) as speech_input_raws:
with gr.Column(scale=0.7):
speech_input = gr.Audio(source="microphone", type="filepath", label="Input")
with gr.Column(scale=0.15, min_width=0):
submit_btn = gr.Button("🏃♂️Submit")
with gr.Column(scale=0.15, min_width=0):
clear_speech = gr.Button("🔄Clear️")
with gr.Row():
speech_output = gr.Audio(label="Output",visible=False)
select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws])
txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
txt.submit(lambda: "", None, txt)
run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
run.click(lambda: "", None, txt)
btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo])
run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button])
clear_txt.click(bot.memory.clear)
clear_txt.click(lambda: [], None, chatbot)
clear_txt.click(lambda: [], None, state)
clear_txt.click(lambda:None, None, txt)
clear_txt.click(bot.clear_button, None, run_button)
clear_txt.click(bot.clear_image, None, show_mel)
clear_txt.click(bot.clear_audio, None, outaudio)
clear_txt.click(bot.clear_video, None, outvideo)
submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo])
clear_speech.click(bot.clear_input_audio, None, speech_input)
clear_speech.click(bot.clear_audio, None, speech_output)
clear_speech.click(lambda: [], None, state)
clear_speech.click(bot.clear_video, None, outvideo)
demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | EXA-1-master | exa/models/AudioGPT/audio-chatgpt.py |
from data_gen.tts.base_preprocess import BasePreprocessor
class LJPreprocess(BasePreprocessor):
def meta_data(self):
for l in open(f'{self.raw_data_dir}/metadata.csv').readlines():
item_name, _, txt = l.strip().split("|")
wav_fn = f"{self.raw_data_dir}/wavs/{item_name}.wav"
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt}
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/egs/datasets/audio/lj/preprocess.py |
import os
from data_gen.tts.base_preprocess import BasePreprocessor
import glob
class LibrittsPreAlign(BasePreprocessor):
def meta_data(self):
wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*.wav'))
for wav_fn in wav_fns:
item_name = os.path.basename(wav_fn)[:-4]
txt_fn = f'{wav_fn[:-4]}.normalized.txt'
with open(txt_fn, 'r') as f:
txt = f.readlines()
f.close()
spk = item_name.split("_")[0]
# Example:
#
# 'item_name': '103_1241_000000_000001'
# 'wav_fn': 'LibriTTS/train-clean-100/103/1241/103_1241_000000_000001.wav'
# 'txt': 'matthew Cuthbert is surprised'
# 'spk_name': '103'
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt[0], 'spk_name': spk}
if __name__ == "__main__":
LibrittsPreAlign().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/egs/datasets/audio/libritts/pre_align.py |
import os
from data_gen.tts.base_pre_align import BasePreAlign
import glob
class VCTKPreAlign(BasePreAlign):
def meta_data(self):
wav_fns = glob.glob(f'{self.raw_data_dir}/wav48/*/*.wav')
for wav_fn in wav_fns:
item_name = os.path.basename(wav_fn)[:-4]
spk = item_name.split("_")[0]
txt_fn = wav_fn.split("/")
txt_fn[-1] = f'{item_name}.txt'
txt_fn[-3] = f'txt'
txt_fn = "/".join(txt_fn)
if os.path.exists(txt_fn) and os.path.exists(wav_fn):
yield item_name, wav_fn, (self.load_txt, txt_fn), spk
if __name__ == "__main__":
VCTKPreAlign().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/egs/datasets/audio/vctk/pre_align.py |
import os
from data_gen.tts.base_preprocess import BasePreprocessor
import glob
import re
class EmoPreAlign(BasePreprocessor):
def meta_data(self):
spks = ['0012', '0011', '0013', '0014', '0015', '0016', '0017', '0018', '0019', '0020']
pattern = re.compile('[\t\n ]+')
for spk in spks:
for line in open(f"{self.raw_data_dir}/{spk}/{spk}.txt", 'r'): # 打开文件
line = re.sub(pattern, ' ', line)
if line == ' ': continue
split_ = line.split(' ')
txt = ' '.join(split_[1: -2])
item_name = split_[0]
emotion = split_[-2]
wav_fn = f'{self.raw_data_dir}/{spk}/{emotion}/{item_name}.wav'
yield item_name, wav_fn, txt, spk, emotion
if __name__ == "__main__":
EmoPreAlign().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/egs/datasets/audio/emotion/pre_align.py |
import importlib
from utils.hparams import set_hparams, hparams
def run_task():
assert hparams['task_cls'] != ''
pkg = ".".join(hparams["task_cls"].split(".")[:-1])
cls_name = hparams["task_cls"].split(".")[-1]
task_cls = getattr(importlib.import_module(pkg), cls_name)
task_cls.start()
if __name__ == '__main__':
set_hparams()
run_task()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/run.py |
import glob
import re
import subprocess
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
from utils.hparams import hparams, set_hparams
import random
import sys
import numpy as np
import torch.distributed as dist
from pytorch_lightning.loggers import TensorBoardLogger
from utils.pl_utils import LatestModelCheckpoint, BaseTrainer, data_loader, DDP
from torch import nn
import torch.utils.data
import utils
import logging
import os
torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, shuffle):
super().__init__()
self.hparams = hparams
self.shuffle = shuffle
self.sort_by_len = hparams['sort_by_len']
self.sizes = None
@property
def _sizes(self):
return self.sizes
def __getitem__(self, index):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def __len__(self):
return len(self._sizes)
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
size = min(self._sizes[index], hparams['max_frames'])
return size
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
if self.sort_by_len:
indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')]
# 先random, 然后稳定排序, 保证排序后同长度的数据顺序是依照random permutation的 (被其随机打乱).
else:
indices = np.arange(len(self))
return indices
@property
def num_workers(self):
return int(os.getenv('NUM_WORKERS', hparams['ds_workers']))
class BaseTask(nn.Module):
def __init__(self, *args, **kwargs):
# dataset configs
super(BaseTask, self).__init__(*args, **kwargs)
self.current_epoch = 0
self.global_step = 0
self.loaded_optimizer_states_dict = {}
self.trainer = None
self.logger = None
self.on_gpu = False
self.use_dp = False
self.use_ddp = False
self.example_input_array = None
self.max_tokens = hparams['max_tokens']
self.max_sentences = hparams['max_sentences']
self.max_eval_tokens = hparams['max_eval_tokens']
if self.max_eval_tokens == -1:
hparams['max_eval_tokens'] = self.max_eval_tokens = self.max_tokens
self.max_eval_sentences = hparams['max_eval_sentences']
if self.max_eval_sentences == -1:
hparams['max_eval_sentences'] = self.max_eval_sentences = self.max_sentences
self.model = None
self.training_losses_meter = None
###########
# Training, validation and testing
###########
def build_model(self):
raise NotImplementedError
def load_ckpt(self, ckpt_base_dir, current_model_name=None, model_name='model', force=True, strict=True):
# This function is updated on 2021.12.13
if current_model_name is None:
current_model_name = model_name
utils.load_ckpt(self.__getattr__(current_model_name), ckpt_base_dir, current_model_name, force, strict)
def on_epoch_start(self):
self.training_losses_meter = {'total_loss': utils.AvgrageMeter()}
def _training_step(self, sample, batch_idx, optimizer_idx):
"""
:param sample:
:param batch_idx:
:return: total loss: torch.Tensor, loss_log: dict
"""
raise NotImplementedError
def training_step(self, sample, batch_idx, optimizer_idx=-1):
loss_ret = self._training_step(sample, batch_idx, optimizer_idx)
self.opt_idx = optimizer_idx
if loss_ret is None:
return {'loss': None}
total_loss, log_outputs = loss_ret
log_outputs = utils.tensors_to_scalars(log_outputs)
for k, v in log_outputs.items():
if k not in self.training_losses_meter:
self.training_losses_meter[k] = utils.AvgrageMeter()
if not np.isnan(v):
self.training_losses_meter[k].update(v)
self.training_losses_meter['total_loss'].update(total_loss.item())
try:
log_outputs['lr'] = self.scheduler.get_lr()
if isinstance(log_outputs['lr'], list):
log_outputs['lr'] = log_outputs['lr'][0]
except:
pass
# log_outputs['all_loss'] = total_loss.item()
progress_bar_log = log_outputs
tb_log = {f'tr/{k}': v for k, v in log_outputs.items()}
return {
'loss': total_loss,
'progress_bar': progress_bar_log,
'log': tb_log
}
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.step()
optimizer.zero_grad()
if self.scheduler is not None:
self.scheduler.step(self.global_step // hparams['accumulate_grad_batches'])
def on_epoch_end(self):
loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()}
print(f"\n==============\n "
f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}"
f"\n==============\n")
def validation_step(self, sample, batch_idx):
"""
:param sample:
:param batch_idx:
:return: output: dict
"""
raise NotImplementedError
def _validation_end(self, outputs):
"""
:param outputs:
:return: loss_output: dict
"""
raise NotImplementedError
def validation_end(self, outputs):
loss_output = self._validation_end(outputs)
print(f"\n==============\n "
f"valid results: {loss_output}"
f"\n==============\n")
return {
'log': {f'val/{k}': v for k, v in loss_output.items()},
'val_loss': loss_output['total_loss']
}
def build_scheduler(self, optimizer):
raise NotImplementedError
def build_optimizer(self, model):
raise NotImplementedError
def configure_optimizers(self):
optm = self.build_optimizer(self.model)
self.scheduler = self.build_scheduler(optm)
return [optm]
def test_start(self):
pass
def test_step(self, sample, batch_idx):
return self.validation_step(sample, batch_idx)
def test_end(self, outputs):
return self.validation_end(outputs)
###########
# Running configuration
###########
@classmethod
def start(cls):
set_hparams()
os.environ['MASTER_PORT'] = str(random.randint(15000, 30000))
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
task = cls()
work_dir = hparams['work_dir']
trainer = BaseTrainer(checkpoint_callback=LatestModelCheckpoint(
filepath=work_dir,
verbose=True,
monitor='val_loss',
mode='min',
num_ckpt_keep=hparams['num_ckpt_keep'],
save_best=hparams['save_best'],
period=1 if hparams['save_ckpt'] else 100000
),
logger=TensorBoardLogger(
save_dir=work_dir,
name='lightning_logs',
version='lastest'
),
gradient_clip_val=hparams['clip_grad_norm'],
val_check_interval=hparams['val_check_interval'],
row_log_interval=hparams['log_interval'],
max_updates=hparams['max_updates'],
num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams[
'validate'] else 10000,
accumulate_grad_batches=hparams['accumulate_grad_batches'])
if not hparams['infer']: # train
t = datetime.now().strftime('%Y%m%d%H%M%S')
code_dir = f'{work_dir}/codes/{t}'
subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True)
for c in hparams['save_codes']:
subprocess.check_call(f'cp -r "{c}" "{code_dir}/"', shell=True)
print(f"| Copied codes to {code_dir}.")
trainer.checkpoint_callback.task = task
trainer.fit(task)
else:
trainer.test(task)
def configure_ddp(self, model, device_ids):
model = DDP(
model,
device_ids=device_ids,
find_unused_parameters=True
)
if dist.get_rank() != 0 and not hparams['debug']:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
random.seed(hparams['seed'])
np.random.seed(hparams['seed'])
return model
def training_end(self, *args, **kwargs):
return None
def init_ddp_connection(self, proc_rank, world_size):
set_hparams(print_hparams=False)
# guarantees unique ports across jobs from same grid search
default_port = 12910
# if user gave a port number, use that one instead
try:
default_port = os.environ['MASTER_PORT']
except Exception:
os.environ['MASTER_PORT'] = str(default_port)
# figure out the root node addr
root_node = '127.0.0.2'
root_node = self.trainer.resolve_root_node_address(root_node)
os.environ['MASTER_ADDR'] = root_node
dist.init_process_group('nccl', rank=proc_rank, world_size=world_size)
@data_loader
def train_dataloader(self):
return None
@data_loader
def test_dataloader(self):
return None
@data_loader
def val_dataloader(self):
return None
def on_load_checkpoint(self, checkpoint):
pass
def on_save_checkpoint(self, checkpoint):
pass
def on_sanity_check_start(self):
pass
def on_train_start(self):
pass
def on_train_end(self):
pass
def on_batch_start(self, batch):
pass
def on_batch_end(self):
pass
def on_pre_performance_check(self):
pass
def on_post_performance_check(self):
pass
def on_before_zero_grad(self, optimizer):
pass
def on_after_backward(self):
pass
def backward(self, loss, optimizer):
loss.backward()
def grad_norm(self, norm_type):
results = {}
total_norm = 0
for name, p in self.named_parameters():
if p.requires_grad:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
norm = param_norm ** (1 / norm_type)
grad = round(norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_{}'.format(norm_type, name)] = grad
except Exception:
# this param had no grad
pass
total_norm = total_norm ** (1. / norm_type)
grad = round(total_norm.data.cpu().numpy().flatten()[0], 3)
results['grad_{}_norm_total'.format(norm_type)] = grad
return results
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/base_task.py |
import torch
import utils
from modules.diff.diffusion import GaussianDiffusion
from modules.diff.net import DiffNet
from tasks.tts.fs2 import FastSpeech2Task
from utils.hparams import hparams
DIFF_DECODERS = {
'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
}
class DiffFsTask(FastSpeech2Task):
def build_tts_model(self):
mel_bins = hparams['audio_num_mel_bins']
self.model = GaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer)
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def _training_step(self, sample, batch_idx, _):
log_outputs = self.run_model(self.model, sample)
total_loss = sum([v for v in log_outputs.values() if isinstance(v, torch.Tensor) and v.requires_grad])
log_outputs['batch_size'] = sample['txt_tokens'].size()[0]
log_outputs['lr'] = self.scheduler.get_lr()[0]
return total_loss, log_outputs
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
_, model_out = self.run_model(self.model, sample, return_output=True, infer=True)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'])
return outputs
def build_scheduler(self, optimizer):
return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx):
if optimizer is None:
return
optimizer.step()
optimizer.zero_grad()
if self.scheduler is not None:
self.scheduler.step(self.global_step // hparams['accumulate_grad_batches'])
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/svs/task.py |
EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/svs/__init__.py |
|
import torch
import utils
from utils.hparams import hparams
from modules.diff.net import DiffNet
from modules.diff.shallow_diffusion_tts import GaussianDiffusion
from tasks.svs.task import DiffFsTask
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
from utils.pitch_utils import denorm_f0
from tasks.tts.fs2_utils import FastSpeechDataset
DIFF_DECODERS = {
'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
}
class DiffSpeechTask(DiffFsTask):
def __init__(self):
super(DiffSpeechTask, self).__init__()
self.dataset_cls = FastSpeechDataset
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
def build_tts_model(self):
mel_bins = hparams['audio_num_mel_bins']
self.model = GaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
if hparams['fs2_ckpt'] != '':
utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
# self.model.fs2.decoder = None
for k, v in self.model.fs2.named_parameters():
if not 'predictor' in k:
v.requires_grad = False
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
filter(lambda p: p.requires_grad, model.parameters()),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return optimizer
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
# mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer)
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
# model_out = self.model(
# txt_tokens, spk_embed=spk_embed, mel2ph=None, f0=None, uv=None, energy=None, ref_mels=None, infer=True)
# self.plot_mel(batch_idx, model_out['mel_out'], model_out['fs2_mel'], name=f'diffspeech_vs_fs2_{batch_idx}')
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=model_out.get('f0_denorm'))
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'])
return outputs
############
# validation plots
############
def plot_wav(self, batch_idx, gt_wav, wav_out, is_mel=False, gt_f0=None, f0=None, name=None):
gt_wav = gt_wav[0].cpu().numpy()
wav_out = wav_out[0].cpu().numpy()
gt_f0 = gt_f0[0].cpu().numpy()
f0 = f0[0].cpu().numpy()
if is_mel:
gt_wav = self.vocoder.spec2wav(gt_wav, f0=gt_f0)
wav_out = self.vocoder.spec2wav(wav_out, f0=f0)
self.logger.experiment.add_audio(f'gt_{batch_idx}', gt_wav, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step)
self.logger.experiment.add_audio(f'wav_{batch_idx}', wav_out, sample_rate=hparams['audio_sample_rate'], global_step=self.global_step)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/svs/diffspeech_task.py |
import torch
import utils
from utils.hparams import hparams
from modules.diff.net import DiffNet
from modules.diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion
from tasks.svs.diffspeech_task import DiffSpeechTask
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
from modules.fastspeech.pe import PitchExtractor
from modules.fastspeech.fs2 import FastSpeech2
from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
from modules.fastspeech.tts_modules import mel2ph_to_dur
from modules.diff.candidate_decoder import FFT
from utils.pitch_utils import denorm_f0
from tasks.tts.fs2_utils import FastSpeechDataset
from tasks.tts.fs2 import FastSpeech2Task
import numpy as np
import os
import torch.nn.functional as F
DIFF_DECODERS = {
'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
'fft': lambda hp: FFT(
hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
}
class DiffSingerTask(DiffSpeechTask):
def __init__(self):
super(DiffSingerTask, self).__init__()
self.dataset_cls = FastSpeechDataset
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
self.pe = PitchExtractor().cuda()
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
self.pe.eval()
def build_tts_model(self):
# import torch
# from tqdm import tqdm
# v_min = torch.ones([80]) * 100
# v_max = torch.ones([80]) * -100
# for i, ds in enumerate(tqdm(self.dataset_cls('train'))):
# v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max)
# v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min)
# if i % 100 == 0:
# print(i, v_min, v_max)
# print('final', v_min, v_max)
mel_bins = hparams['audio_num_mel_bins']
self.model = GaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
if hparams['fs2_ckpt'] != '':
utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
# self.model.fs2.decoder = None
for k, v in self.model.fs2.named_parameters():
v.requires_grad = False
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
return outputs
class ShallowDiffusionOfflineDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index)
item = self._get_item(index)
if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
fs2_ckpt = os.path.dirname(hparams['fs2_ckpt'])
item_name = item['item_name']
fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer.
sample['fs2_mel'] = fs2_mel
return sample
def collater(self, samples):
batch = super(ShallowDiffusionOfflineDataset, self).collater(samples)
if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0)
return batch
class DiffSingerOfflineTask(DiffSingerTask):
def __init__(self):
super(DiffSingerOfflineTask, self).__init__()
self.dataset_cls = ShallowDiffusionOfflineDataset
def build_tts_model(self):
mel_bins = hparams['audio_num_mel_bins']
self.model = OfflineGaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
# if hparams['fs2_ckpt'] != '':
# utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
# self.model.fs2.decoder = None
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
fs2_mel = None #sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer)
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
# self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
# if hparams['use_pitch_embed']:
# self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
fs2_mel = sample['fs2_mels']
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy,
ref_mels=[None, fs2_mel], infer=True)
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}')
return outputs
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
txt_tokens = sample['txt_tokens']
energy = sample['energy']
if hparams['profile_infer']:
pass
else:
mel2ph, uv, f0 = None, None, None
if hparams['use_gt_dur']:
mel2ph = sample['mel2ph']
if hparams['use_gt_f0']:
f0 = sample['f0']
uv = sample['uv']
fs2_mel = sample['fs2_mels']
outputs = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy,
infer=True)
sample['outputs'] = self.model.out2mel(outputs['mel_out'])
sample['mel2ph_pred'] = outputs['mel2ph']
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel
else:
sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
sample['f0_pred'] = outputs.get('f0_denorm')
return self.after_infer(sample)
class MIDIDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(MIDIDataset, self).__getitem__(index)
item = self._get_item(index)
sample['f0_midi'] = torch.FloatTensor(item['f0_midi'])
sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
return sample
def collater(self, samples):
batch = super(MIDIDataset, self).collater(samples)
batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0)
batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
# print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all())
return batch
class OpencpopDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(OpencpopDataset, self).__getitem__(index)
item = self._get_item(index)
sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']]
sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']]
sample['word_boundary'] = torch.LongTensor(item['word_boundary'])[:hparams['max_frames']]
return sample
def collater(self, samples):
batch = super(OpencpopDataset, self).collater(samples)
batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0)
batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0)
batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0)
return batch
class DiffSingerMIDITask(DiffSingerTask):
def __init__(self):
super(DiffSingerMIDITask, self).__init__()
# self.dataset_cls = MIDIDataset
self.dataset_cls = OpencpopDataset
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
# mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]
mel2ph = sample['mel2ph']
if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']:
f0 = None
uv = None
else:
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'],
midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True,
pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
if hparams['use_pitch_embed']:
self.plot_pitch(batch_idx, sample, model_out)
return outputs
def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None):
"""
:param dur_pred: [B, T], float, log scale
:param mel2ph: [B, T]
:param txt_tokens: [B, T]
:param losses:
:return:
"""
B, T = txt_tokens.shape
nonpadding = (txt_tokens != 0).float()
dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
is_sil = torch.zeros_like(txt_tokens).bool()
for p in self.sil_ph:
is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
is_sil = is_sil.float() # [B, T_txt]
# phone duration loss
if hparams['dur_loss'] == 'mse':
losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
dur_pred = (dur_pred.exp() - 1).clamp(min=0)
else:
raise NotImplementedError
# use linear scale for sent and word duration
if hparams['lambda_word_dur'] > 0:
idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1]
# word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur
word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred)
word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt)
wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
word_nonpadding = (word_dur_g > 0).float()
wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
if hparams['lambda_sent_dur'] > 0:
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
class AuxDecoderMIDITask(FastSpeech2Task):
def __init__(self):
super().__init__()
# self.dataset_cls = MIDIDataset
self.dataset_cls = OpencpopDataset
def build_tts_model(self):
if hparams.get('use_midi') is not None and hparams['use_midi']:
self.model = FastSpeech2MIDI(self.phone_encoder)
else:
self.model = FastSpeech2(self.phone_encoder)
def run_model(self, model, sample, return_output=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'],
midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
losses = {}
self.add_mel_loss(output['mel_out'], target, losses)
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None):
"""
:param dur_pred: [B, T], float, log scale
:param mel2ph: [B, T]
:param txt_tokens: [B, T]
:param losses:
:return:
"""
B, T = txt_tokens.shape
nonpadding = (txt_tokens != 0).float()
dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
is_sil = torch.zeros_like(txt_tokens).bool()
for p in self.sil_ph:
is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
is_sil = is_sil.float() # [B, T_txt]
# phone duration loss
if hparams['dur_loss'] == 'mse':
losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
dur_pred = (dur_pred.exp() - 1).clamp(min=0)
else:
raise NotImplementedError
# use linear scale for sent and word duration
if hparams['lambda_word_dur'] > 0:
idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1]
# word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur
word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred)
word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt)
wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
word_nonpadding = (word_dur_g > 0).float()
wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
if hparams['lambda_sent_dur'] > 0:
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
mel_out = self.model.out2mel(model_out['mel_out'])
outputs = utils.tensors_to_scalars(outputs)
# if sample['mels'].shape[0] == 1:
# self.add_laplace_var(mel_out, sample['mels'], outputs)
if batch_idx < hparams['num_valid_plots']:
self.plot_mel(batch_idx, sample['mels'], mel_out)
self.plot_dur(batch_idx, sample, model_out)
if hparams['use_pitch_embed']:
self.plot_pitch(batch_idx, sample, model_out)
return outputs | EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/svs/diffsinger_task.py |
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from modules.portaspeech.portaspeech import PortaSpeech
from modules.syntaspeech.multi_window_disc import Discriminator
from tasks.tts.fs2 import FastSpeech2Task
from utils.hparams import hparams
from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate, mel2token_to_dur
from utils import num_params, tensors_to_scalars
from utils.pitch_utils import denorm_f0, norm_f0
from data_gen.tts.data_gen_utils import get_pitch
from utils.dtw import dtw as DTW
from utils.plot import spec_to_figure
from utils.text.text_encoder import build_token_encoder
class PortaSpeechAdvTask(FastSpeech2Task):
def __init__(self):
super().__init__()
data_dir = hparams['binary_data_dir']
self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json')
self.build_disc_model()
self.mse_loss_fn = torch.nn.MSELoss()
def build_tts_model(self):
ph_dict_size = len(self.token_encoder)
word_dict_size = len(self.word_encoder)
self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams)
self.gen_params = [p for p in self.model.parameters() if p.requires_grad]
self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)]
self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)]
self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)]
self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ]
self.use_bert = True if len(self.bert_params) > 0 else False
def build_disc_model(self):
disc_win_num = hparams['disc_win_num']
h = hparams['mel_disc_hidden_size']
self.mel_disc = Discriminator(
time_lengths=[32, 64, 128][:disc_win_num],
freq_length=80, hidden_size=h, kernel=(3, 3)
)
self.disc_params = list(self.mel_disc.parameters())
def on_train_start(self):
super().on_train_start()
for n, m in self.model.named_children():
num_params(m, model_name=n)
if hasattr(self.model, 'fvae'):
for n, m in self.model.fvae.named_children():
num_params(m, model_name=f'fvae.{n}')
def _training_step(self, sample, batch_idx, optimizer_idx):
loss_output = {}
loss_weights = {}
disc_start = self.global_step >= hparams["disc_start_steps"] and hparams['lambda_mel_adv'] > 0
if optimizer_idx == 0:
#######################
# Generator #
#######################
loss_output, model_out = self.run_model(sample, infer=False)
self.model_out_gt = self.model_out = \
{k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)}
if disc_start:
mel_p = model_out['mel_out']
if hasattr(self.model, 'out2mel'):
mel_p = self.model.out2mel(mel_p)
o_ = self.mel_disc(mel_p)
p_, pc_ = o_['y'], o_['y_c']
if p_ is not None:
loss_output['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size()))
loss_weights['a'] = hparams['lambda_mel_adv']
if pc_ is not None:
loss_output['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size()))
loss_weights['ac'] = hparams['lambda_mel_adv']
else:
#######################
# Discriminator #
#######################
if disc_start and self.global_step % hparams['disc_interval'] == 0:
model_out = self.model_out_gt
mel_g = sample['mels']
mel_p = model_out['mel_out']
o = self.mel_disc(mel_g)
p, pc = o['y'], o['y_c']
o_ = self.mel_disc(mel_p)
p_, pc_ = o_['y'], o_['y_c']
if p_ is not None:
loss_output["r"] = self.mse_loss_fn(p, p.new_ones(p.size()))
loss_output["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size()))
if pc_ is not None:
loss_output["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size()))
loss_output["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size()))
total_loss = sum([loss_weights.get(k, 1) * v for k, v in loss_output.items() if isinstance(v, torch.Tensor) and v.requires_grad])
loss_output['batch_size'] = sample['txt_tokens'].size()[0]
return total_loss, loss_output
def run_model(self, sample, infer=False, *args, **kwargs):
txt_tokens = sample['txt_tokens']
word_tokens = sample['word_tokens']
spk_embed = sample.get('spk_embed')
spk_id = sample.get('spk_ids')
if not infer:
output = self.model(txt_tokens, word_tokens,
ph2word=sample['ph2word'],
mel2word=sample['mel2word'],
mel2ph=sample['mel2ph'],
word_len=sample['word_lengths'].max(),
tgt_mels=sample['mels'],
pitch=sample.get('pitch'),
spk_embed=spk_embed,
spk_id=spk_id,
infer=False,
global_step=self.global_step,
graph_lst=sample['graph_lst'],
etypes_lst=sample['etypes_lst'],
bert_feats=sample.get("bert_feats"),
cl_feats=sample.get("cl_feats")
)
losses = {}
losses['kl_v'] = output['kl'].detach()
losses_kl = output['kl']
losses_kl = torch.clamp(losses_kl, min=hparams['kl_min'])
losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl
losses_kl = losses_kl * hparams['lambda_kl']
losses['kl'] = losses_kl
self.add_mel_loss(output['mel_out'], sample['mels'], losses)
if hparams['dur_level'] == 'word':
self.add_dur_loss(
output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
self.get_attn_stats(output['attn'], sample, losses)
else:
super(PortaSpeechAdvTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
return losses, output
else:
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
output = self.model(
txt_tokens, word_tokens,
ph2word=sample['ph2word'],
word_len=sample['word_lengths'].max(),
pitch=sample.get('pitch'),
mel2ph=sample['mel2ph'] if use_gt_dur else None,
mel2word=sample['mel2word'] if use_gt_dur else None,
tgt_mels=sample['mels'],
infer=True,
spk_embed=spk_embed,
spk_id=spk_id,
graph_lst=sample['graph_lst'],
etypes_lst=sample['etypes_lst'],
bert_feats=sample.get("bert_feats"),
cl_feats=sample.get("cl_feats")
)
return output
def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None):
T = word_len.max()
dur_gt = mel2token_to_dur(mel2token, T).float()
nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float()
dur_pred = dur_pred * nonpadding
dur_gt = dur_gt * nonpadding
wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
wdur = (wdur * nonpadding).sum() / nonpadding.sum()
if hparams['lambda_word_dur'] > 0:
losses['wdur'] = wdur * hparams['lambda_word_dur']
if hparams['lambda_sent_dur'] > 0:
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean')
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
with torch.no_grad():
# calculate word-level abs_dur_error in micro-second
abs_word_dur_error = F.l1_loss(dur_pred , dur_gt, reduction='none')
abs_word_dur_error = (abs_word_dur_error * nonpadding).sum() / nonpadding.sum()
abs_word_dur_error = abs_word_dur_error * hparams['hop_size'] / hparams['audio_sample_rate'] * 1000
losses['abs_word_dur_error'] = abs_word_dur_error
# calculate word-level abs_dur_error in second
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
abs_sent_dur_error = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean').mean()
abs_sent_dur_error = abs_sent_dur_error * hparams['hop_size'] / hparams['audio_sample_rate']
losses['abs_sent_dur_error'] = abs_sent_dur_error
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(sample)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = tensors_to_scalars(outputs)
if self.global_step % hparams['valid_infer_interval'] == 0 \
and batch_idx < hparams['num_valid_plots']:
valid_results = self.save_valid_result(sample, batch_idx, model_out)
wav_gt = valid_results['wav_gt']
mel_gt = valid_results['mel_gt']
wav_pred = valid_results['wav_pred']
mel_pred = valid_results['mel_pred']
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
manhattan_distance = lambda x, y: np.abs(x - y)
dist, cost, acc, path = DTW(f0_pred_, f0_gt_, manhattan_distance)
outputs['losses']['f0_dtw'] = dist / len(f0_gt_)
return outputs
def save_valid_result(self, sample, batch_idx, model_out):
sr = hparams['audio_sample_rate']
f0_gt = None
mel_out = model_out['mel_out']
if sample.get('f0') is not None:
f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt)
# if self.global_step > 0:
wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt)
self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr)
# with gt duration
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
dur_info = self.get_plot_dur_info(sample, model_out)
del dur_info['dur_pred']
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_gdur_{batch_idx}',
dur_info=dur_info, f0s=f0_gt)
# with pred duration
if not hparams['use_gt_dur']:
model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
dur_info = self.get_plot_dur_info(sample, model_out)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}',
dur_info=dur_info, f0s=f0_gt)
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt)
self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
# gt wav
mel_gt = sample['mels'][0].cpu()
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
if self.global_step <= hparams['valid_infer_interval']:
self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
# add attn plot
if self.global_step > 0 and hparams['dur_level'] == 'word':
self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
return {'wav_gt': wav_gt, 'wav_pred': wav_pred, 'mel_gt': mel_gt, 'mel_pred': model_out['mel_out'][0].cpu()}
def get_attn_stats(self, attn, sample, logging_outputs, prefix=''):
# diagonal_focus_rate
txt_lengths = sample['txt_lengths'].float()
mel_lengths = sample['mel_lengths'].float()
src_padding_mask = sample['txt_tokens'].eq(0)
target_padding_mask = sample['mels'].abs().sum(-1).eq(0)
src_seg_mask = sample['txt_tokens'].eq(self.seg_idx)
attn_ks = txt_lengths.float() / mel_lengths.float()
focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data
phone_coverage_rate = get_phone_coverage_rate(
attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()
diagonal_focus_rate, diag_mask = get_diagonal_focus_rate(
attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask)
logging_outputs[f'{prefix}fr'] = focus_rate.mean().data
logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data
logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data
def get_plot_dur_info(self, sample, model_out):
if hparams['dur_level'] == 'word':
T_txt = sample['word_lengths'].max()
dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
txt = sample['ph_words'][0].split(" ")
else:
T_txt = sample['txt_tokens'].shape[1]
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
txt = txt.split(" ")
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
def build_optimizer(self, model):
optimizer_gen = torch.optim.AdamW(
self.gen_params,
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
optimizer_disc = torch.optim.AdamW(
self.disc_params,
lr=hparams['disc_lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
**hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None
return [optimizer_gen, optimizer_disc]
def build_scheduler(self, optimizer):
return [
FastSpeechTask.build_scheduler(self, optimizer[0]), # Generator Scheduler
torch.optim.lr_scheduler.StepLR(optimizer=optimizer[1], # Discriminator Scheduler
**hparams["discriminator_scheduler_params"]),
]
def on_before_optimization(self, opt_idx):
if opt_idx == 0:
nn.utils.clip_grad_norm_(self.dp_params, hparams['clip_grad_norm'])
if self.use_bert:
nn.utils.clip_grad_norm_(self.bert_params, hparams['clip_grad_norm'])
nn.utils.clip_grad_norm_(self.gen_params_except_bert_and_dp, hparams['clip_grad_norm'])
else:
nn.utils.clip_grad_norm_(self.gen_params_except_dp, hparams['clip_grad_norm'])
else:
nn.utils.clip_grad_norm_(self.disc_params, hparams["clip_grad_norm"])
def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx):
if self.scheduler is not None:
self.scheduler[0].step(self.global_step // hparams['accumulate_grad_batches'])
self.scheduler[1].step(self.global_step // hparams['accumulate_grad_batches'])
############
# infer
############
def test_start(self):
super().test_start()
if hparams.get('save_attn', False):
os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
self.model.store_inverse_all()
def test_step(self, sample, batch_idx):
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
outputs = self.run_model(sample, infer=True)
text = sample['text'][0]
item_name = sample['item_name'][0]
tokens = sample['txt_tokens'][0].cpu().numpy()
mel_gt = sample['mels'][0].cpu().numpy()
mel_pred = outputs['mel_out'][0].cpu().numpy()
mel2ph = sample['mel2ph'][0].cpu().numpy()
mel2ph_pred = None
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
if text is not None:
base_fn += text.replace(":", "$3A")[:80]
base_fn = base_fn.replace(' ', '_')
gen_dir = self.gen_dir
wav_pred = self.vocoder.spec2wav(mel_pred)
self.saving_result_pool.add_job(self.save_result, args=[
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
if hparams['save_gt']:
wav_gt = self.vocoder.spec2wav(mel_gt)
self.saving_result_pool.add_job(self.save_result, args=[
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
if hparams.get('save_attn', False):
attn = outputs['attn'][0].cpu().numpy()
np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
# save f0 for pitch dtw
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
np.save(f'{gen_dir}/f0/{item_name}.npy', f0_pred_)
np.save(f'{gen_dir}/f0/{item_name}_gt.npy', f0_gt_)
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
return {
'item_name': item_name,
'text': text,
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
'wav_fn_pred': base_fn % 'P',
'wav_fn_gt': base_fn % 'G',
}
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/ps_adv.py |
import filecmp
import matplotlib
from utils.plot import spec_to_figure
matplotlib.use('Agg')
from data_gen.tts.data_gen_utils import get_pitch
from modules.fastspeech.tts_modules import mel2ph_to_dur
from tasks.tts.dataset_utils import BaseTTSDataset
from utils.tts_utils import sequence_mask
from multiprocessing.pool import Pool
from tasks.base_task import data_loader, BaseConcatDataset
from utils.common_schedulers import RSQRTSchedule, NoneSchedule
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
import os
import numpy as np
from tqdm import tqdm
import torch.distributed as dist
from tasks.base_task import BaseTask
from utils.hparams import hparams
from utils.text_encoder import TokenTextEncoder
import json
import matplotlib.pyplot as plt
import torch
import torch.optim
import torch.utils.data
import utils
from utils import audio
import pandas as pd
class TTSBaseTask(BaseTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_cls = BaseTTSDataset
self.max_tokens = hparams['max_tokens']
self.max_sentences = hparams['max_sentences']
self.max_valid_tokens = hparams['max_valid_tokens']
if self.max_valid_tokens == -1:
hparams['max_valid_tokens'] = self.max_valid_tokens = self.max_tokens
self.max_valid_sentences = hparams['max_valid_sentences']
if self.max_valid_sentences == -1:
hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences
self.vocoder = None
self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir'])
self.padding_idx = self.phone_encoder.pad()
self.eos_idx = self.phone_encoder.eos()
self.seg_idx = self.phone_encoder.seg()
self.saving_result_pool = None
self.saving_results_futures = None
self.stats = {}
@data_loader
def train_dataloader(self):
if hparams['train_sets'] != '':
train_sets = hparams['train_sets'].split("|")
# check if all train_sets have the same spk map and dictionary
binary_data_dir = hparams['binary_data_dir']
file_to_cmp = ['phone_set.json']
if os.path.exists(f'{binary_data_dir}/word_set.json'):
file_to_cmp.append('word_set.json')
if hparams['use_spk_id']:
file_to_cmp.append('spk_map.json')
for f in file_to_cmp:
for ds_name in train_sets:
base_file = os.path.join(binary_data_dir, f)
ds_file = os.path.join(ds_name, f)
assert filecmp.cmp(base_file, ds_file), \
f'{f} in {ds_name} is not same with that in {binary_data_dir}.'
train_dataset = BaseConcatDataset([
self.dataset_cls(prefix='train', shuffle=True, data_dir=ds_name) for ds_name in train_sets])
else:
train_dataset = self.dataset_cls(prefix=hparams['train_set_name'], shuffle=True)
return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
endless=hparams['endless_ds'])
@data_loader
def val_dataloader(self):
valid_dataset = self.dataset_cls(prefix=hparams['valid_set_name'], shuffle=False)
return self.build_dataloader(valid_dataset, False, self.max_valid_tokens, self.max_valid_sentences)
@data_loader
def test_dataloader(self):
test_dataset = self.dataset_cls(prefix=hparams['test_set_name'], shuffle=False)
self.test_dl = self.build_dataloader(
test_dataset, False, self.max_valid_tokens,
self.max_valid_sentences, batch_by_size=False)
return self.test_dl
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
required_batch_size_multiple=-1, endless=False, batch_by_size=True):
devices_cnt = torch.cuda.device_count()
if devices_cnt == 0:
devices_cnt = 1
if required_batch_size_multiple == -1:
required_batch_size_multiple = devices_cnt
def shuffle_batches(batches):
np.random.shuffle(batches)
return batches
if max_tokens is not None:
max_tokens *= devices_cnt
if max_sentences is not None:
max_sentences *= devices_cnt
indices = dataset.ordered_indices()
if batch_by_size:
batch_sampler = utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
else:
batch_sampler = []
for i in range(0, len(indices), max_sentences):
batch_sampler.append(indices[i:i + max_sentences])
if shuffle:
batches = shuffle_batches(list(batch_sampler))
if endless:
batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
else:
batches = batch_sampler
if endless:
batches = [b for _ in range(1000) for b in batches]
num_workers = dataset.num_workers
if self.trainer.use_ddp:
num_replicas = dist.get_world_size()
rank = dist.get_rank()
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
return torch.utils.data.DataLoader(dataset,
collate_fn=dataset.collater,
batch_sampler=batches,
num_workers=num_workers,
pin_memory=False)
def build_phone_encoder(self, data_dir):
phone_list_file = os.path.join(data_dir, 'phone_set.json')
phone_list = json.load(open(phone_list_file))
return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
def build_scheduler(self, optimizer):
if hparams['scheduler'] == 'rsqrt':
return RSQRTSchedule(optimizer)
else:
return NoneSchedule(optimizer)
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
model.parameters(),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return optimizer
def plot_mel(self, batch_idx, spec, spec_out, name=None):
spec_cat = torch.cat([spec, spec_out], -1)
name = f'mel_{batch_idx}' if name is None else name
vmin = hparams['mel_vmin']
vmax = hparams['mel_vmax']
self.logger.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step)
def test_start(self):
self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16))
self.saving_results_futures = []
self.results_id = 0
self.gen_dir = os.path.join(
hparams['work_dir'],
f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
def after_infer(self, predictions, sil_start_frame=0):
predictions = utils.unpack_dict_to_list(predictions)
assert len(predictions) == 1, 'Only support batch_size=1 in inference.'
prediction = predictions[0]
prediction = utils.tensors_to_np(prediction)
item_name = prediction.get('item_name')
text = prediction.get('text')
ph_tokens = prediction.get('txt_tokens')
mel_gt = prediction["mels"]
mel2ph_gt = prediction.get("mel2ph")
mel2ph_gt = mel2ph_gt if mel2ph_gt is not None else None
mel_pred = prediction["outputs"]
mel2ph_pred = prediction.get("mel2ph_pred")
f0_gt = prediction.get("f0")
f0_pred = prediction.get("f0_pred")
str_phs = None
if self.phone_encoder is not None and 'txt_tokens' in prediction:
str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True)
if 'encdec_attn' in prediction:
encdec_attn = prediction['encdec_attn']
encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)]
txt_lengths = prediction.get('txt_lengths')
encdec_attn = encdec_attn.T[:txt_lengths, :len(mel_gt)]
else:
encdec_attn = None
wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
wav_pred[:sil_start_frame * hparams['hop_size']] = 0
gen_dir = self.gen_dir
base_fn = f'[{self.results_id:06d}][{item_name}][%s]'
# if text is not None:
# base_fn += text.replace(":", "%3A")[:80]
base_fn = base_fn.replace(' ', '_')
if not hparams['profile_infer']:
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/plot', exist_ok=True)
if hparams.get('save_mel_npy', False):
os.makedirs(f'{gen_dir}/npy', exist_ok=True)
if 'encdec_attn' in prediction:
os.makedirs(f'{gen_dir}/attn_plot', exist_ok=True)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred, encdec_attn]))
if mel_gt is not None and hparams['save_gt']:
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph_gt]))
if hparams['save_f0']:
import matplotlib.pyplot as plt
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
fig = plt.figure()
plt.plot(f0_pred_, label=r'$\hat{f_0}$')
plt.plot(f0_gt_, label=r'$f_0$')
plt.legend()
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png')
plt.close(fig)
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
self.results_id += 1
return {
'item_name': item_name,
'text': text,
'ph_tokens': self.phone_encoder.decode(ph_tokens.tolist()),
'wav_fn_pred': base_fn % 'P',
'wav_fn_gt': base_fn % 'G',
}
@staticmethod
def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None):
audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
norm=hparams['out_wav_norm'])
fig = plt.figure(figsize=(14, 10))
spec_vmin = hparams['mel_vmin']
spec_vmax = hparams['mel_vmax']
heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax)
fig.colorbar(heatmap)
f0, _ = get_pitch(wav_out, mel, hparams)
f0 = f0 / 10 * (f0 > 0)
plt.plot(f0, c='white', linewidth=1, alpha=0.6)
if mel2ph is not None and str_phs is not None:
decoded_txt = str_phs.split(" ")
dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy()
dur = [0] + list(np.cumsum(dur))
for i in range(len(dur) - 1):
shift = (i % 20) + 1
plt.text(dur[i], shift, decoded_txt[i])
plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black')
plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black',
alpha=1, linewidth=1)
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png')
plt.close(fig)
if hparams.get('save_mel_npy', False):
np.save(f'{gen_dir}/npy/{base_fn}', mel)
if alignment is not None:
fig, ax = plt.subplots(figsize=(12, 16))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
decoded_txt = str_phs.split(" ")
ax.set_yticks(np.arange(len(decoded_txt)))
ax.set_yticklabels(list(decoded_txt), fontsize=6)
fig.colorbar(im, ax=ax)
fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
plt.close(fig)
def test_end(self, outputs):
pd.DataFrame(outputs).to_csv(f'{self.gen_dir}/meta.csv')
self.saving_result_pool.close()
[f.get() for f in tqdm(self.saving_results_futures)]
self.saving_result_pool.join()
return {}
##########
# utils
##########
def weights_nonzero_speech(self, target):
# target : B x T x mel
# Assign weight 1.0 to all labels except for padding (id=0).
dim = target.size(-1)
return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
def make_stop_target(self, target):
# target : B x T x mel
seq_mask = target.abs().sum(-1).ne(0).float()
seq_length = seq_mask.sum(1)
mask_r = 1 - sequence_mask(seq_length - 1, target.size(1)).float()
return seq_mask, mask_r
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/tts_base.py |
from multiprocessing.pool import Pool
import matplotlib
from utils.pl_utils import data_loader
from utils.training_utils import RSQRTSchedule
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
from modules.fastspeech.pe import PitchExtractor
matplotlib.use('Agg')
import os
import numpy as np
from tqdm import tqdm
import torch.distributed as dist
from tasks.base_task import BaseTask
from utils.hparams import hparams
from utils.text_encoder import TokenTextEncoder
import json
import torch
import torch.optim
import torch.utils.data
import utils
class TtsTask(BaseTask):
def __init__(self, *args, **kwargs):
self.vocoder = None
self.phone_encoder = self.build_phone_encoder(hparams['binary_data_dir'])
self.padding_idx = self.phone_encoder.pad()
self.eos_idx = self.phone_encoder.eos()
self.seg_idx = self.phone_encoder.seg()
self.saving_result_pool = None
self.saving_results_futures = None
self.stats = {}
super().__init__(*args, **kwargs)
def build_scheduler(self, optimizer):
return RSQRTSchedule(optimizer)
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
model.parameters(),
lr=hparams['lr'])
return optimizer
def build_dataloader(self, dataset, shuffle, max_tokens=None, max_sentences=None,
required_batch_size_multiple=-1, endless=False, batch_by_size=True):
devices_cnt = torch.cuda.device_count()
if devices_cnt == 0:
devices_cnt = 1
if required_batch_size_multiple == -1:
required_batch_size_multiple = devices_cnt
def shuffle_batches(batches):
np.random.shuffle(batches)
return batches
if max_tokens is not None:
max_tokens *= devices_cnt
if max_sentences is not None:
max_sentences *= devices_cnt
indices = dataset.ordered_indices()
if batch_by_size:
batch_sampler = utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
else:
batch_sampler = []
for i in range(0, len(indices), max_sentences):
batch_sampler.append(indices[i:i + max_sentences])
if shuffle:
batches = shuffle_batches(list(batch_sampler))
if endless:
batches = [b for _ in range(1000) for b in shuffle_batches(list(batch_sampler))]
else:
batches = batch_sampler
if endless:
batches = [b for _ in range(1000) for b in batches]
num_workers = dataset.num_workers
if self.trainer.use_ddp:
num_replicas = dist.get_world_size()
rank = dist.get_rank()
batches = [x[rank::num_replicas] for x in batches if len(x) % num_replicas == 0]
return torch.utils.data.DataLoader(dataset,
collate_fn=dataset.collater,
batch_sampler=batches,
num_workers=num_workers,
pin_memory=False)
def build_phone_encoder(self, data_dir):
phone_list_file = os.path.join(data_dir, 'phone_set.json')
phone_list = json.load(open(phone_list_file))
return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
def build_optimizer(self, model):
self.optimizer = optimizer = torch.optim.AdamW(
model.parameters(),
lr=hparams['lr'])
return optimizer
def test_start(self):
self.saving_result_pool = Pool(8)
self.saving_results_futures = []
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
self.pe = PitchExtractor().cuda()
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
self.pe.eval()
def test_end(self, outputs):
self.saving_result_pool.close()
[f.get() for f in tqdm(self.saving_results_futures)]
self.saving_result_pool.join()
return {}
##########
# utils
##########
def weights_nonzero_speech(self, target):
# target : B x T x mel
# Assign weight 1.0 to all labels except for padding (id=0).
dim = target.size(-1)
return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)
if __name__ == '__main__':
TtsTask.start()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/tts.py |
from utils.cwt import get_lf0_cwt
import torch.optim
import torch.utils.data
import importlib
from utils.indexed_datasets import IndexedDataset
from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse
import numpy as np
from tasks.base_task import BaseDataset
import torch
import torch.optim
import torch.utils.data
import utils
import torch.distributions
from utils.hparams import hparams
from resemblyzer import VoiceEncoder
import json
from data_gen.tts.data_gen_utils import build_phone_encoder
class BaseTTSDataset(BaseDataset):
def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None):
super().__init__(shuffle)
self.data_dir = hparams['binary_data_dir'] if data_dir is None else data_dir
self.prefix = prefix
self.hparams = hparams
self.indexed_ds = None
self.ext_mel2ph = None
def load_size():
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
if prefix == 'test':
if test_items is not None:
self.indexed_ds, self.sizes = test_items, test_sizes
else:
load_size()
if hparams['num_test_samples'] > 0:
self.avail_idxs = [x for x in range(hparams['num_test_samples']) \
if x < len(self.sizes)]
if len(hparams['test_ids']) > 0:
self.avail_idxs = hparams['test_ids'] + self.avail_idxs
else:
self.avail_idxs = list(range(len(self.sizes)))
else:
load_size()
self.avail_idxs = list(range(len(self.sizes)))
if hparams['min_frames'] > 0:
self.avail_idxs = [
x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']]
self.sizes = [self.sizes[i] for i in self.avail_idxs]
def _get_item(self, index):
if hasattr(self, 'avail_idxs') and self.avail_idxs is not None:
index = self.avail_idxs[index]
if self.indexed_ds is None:
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
return self.indexed_ds[index]
def __getitem__(self, index):
hparams = self.hparams
item = self._get_item(index)
assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index])
max_frames = hparams['max_frames']
spec = torch.Tensor(item['mel'])[:max_frames]
max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple']
spec = spec[:max_frames]
phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']])
sample = {
"id": index,
"item_name": item['item_name'],
"text": item['txt'],
"txt_token": phone,
"mel": spec,
"mel_nonpadding": spec.abs().sum(-1) > 0,
}
if hparams['use_spk_embed']:
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
if hparams['use_spk_id']:
sample["spk_id"] = int(item['spk_id'])
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
hparams = self.hparams
id = torch.LongTensor([s['id'] for s in samples])
item_names = [s['item_name'] for s in samples]
text = [s['text'] for s in samples]
txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0)
mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples])
mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
batch = {
'id': id,
'item_name': item_names,
'nsamples': len(samples),
'text': text,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'mels': mels,
'mel_lengths': mel_lengths,
}
if hparams['use_spk_embed']:
spk_embed = torch.stack([s['spk_embed'] for s in samples])
batch['spk_embed'] = spk_embed
if hparams['use_spk_id']:
spk_ids = torch.LongTensor([s['spk_id'] for s in samples])
batch['spk_ids'] = spk_ids
return batch
class FastSpeechDataset(BaseTTSDataset):
def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None):
super().__init__(prefix, shuffle, test_items, test_sizes, data_dir)
self.f0_mean, self.f0_std = hparams.get('f0_mean', None), hparams.get('f0_std', None)
if prefix == 'test' and hparams['test_input_dir'] != '':
self.data_dir = hparams['test_input_dir']
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
self.indexed_ds = sorted(self.indexed_ds, key=lambda item: item['item_name'])
items = {}
for i in range(len(self.indexed_ds)):
speaker = self.indexed_ds[i]['item_name'].split('_')[0]
if speaker not in items.keys():
items[speaker] = [i]
else:
items[speaker].append(i)
sort_item = sorted(items.values(), key=lambda item_pre_speaker: len(item_pre_speaker), reverse=True)
self.avail_idxs = [n for a in sort_item for n in a][:hparams['num_test_samples']]
self.indexed_ds, self.sizes = self.load_test_inputs()
self.avail_idxs = [i for i in range(hparams['num_test_samples'])]
if hparams['pitch_type'] == 'cwt':
_, hparams['cwt_scales'] = get_lf0_cwt(np.ones(10))
def __getitem__(self, index):
sample = super(FastSpeechDataset, self).__getitem__(index)
item = self._get_item(index)
hparams = self.hparams
max_frames = hparams['max_frames']
spec = sample['mel']
T = spec.shape[0]
phone = sample['txt_token']
sample['energy'] = (spec.exp() ** 2).sum(-1).sqrt()
sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] if 'mel2ph' in item else None
if hparams['use_pitch_embed']:
assert 'f0' in item
if hparams.get('normalize_pitch', False):
f0 = item["f0"]
if len(f0 > 0) > 0 and f0[f0 > 0].std() > 0:
f0[f0 > 0] = (f0[f0 > 0] - f0[f0 > 0].mean()) / f0[f0 > 0].std() * hparams['f0_std'] + \
hparams['f0_mean']
f0[f0 > 0] = f0[f0 > 0].clip(min=60, max=500)
pitch = f0_to_coarse(f0)
pitch = torch.LongTensor(pitch[:max_frames])
else:
pitch = torch.LongTensor(item.get("pitch"))[:max_frames] if "pitch" in item else None
f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
uv = torch.FloatTensor(uv)
f0 = torch.FloatTensor(f0)
if hparams['pitch_type'] == 'cwt':
cwt_spec = torch.Tensor(item['cwt_spec'])[:max_frames]
f0_mean = item.get('f0_mean', item.get('cwt_mean'))
f0_std = item.get('f0_std', item.get('cwt_std'))
sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std})
elif hparams['pitch_type'] == 'ph':
if "f0_ph" in item:
f0 = torch.FloatTensor(item['f0_ph'])
else:
f0 = denorm_f0(f0, None, hparams)
f0_phlevel_sum = torch.zeros_like(phone).float().scatter_add(0, mel2ph - 1, f0)
f0_phlevel_num = torch.zeros_like(phone).float().scatter_add(
0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1)
f0_ph = f0_phlevel_sum / f0_phlevel_num
f0, uv = norm_interp_f0(f0_ph, hparams)
else:
f0 = uv = torch.zeros_like(mel2ph)
pitch = None
sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch
if hparams['use_spk_embed']:
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
if hparams['use_spk_id']:
sample["spk_id"] = item['spk_id']
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
hparams = self.hparams
batch = super(FastSpeechDataset, self).collater(samples)
f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
pitch = utils.collate_1d([s['pitch'] for s in samples]) if samples[0]['pitch'] is not None else None
uv = utils.collate_1d([s['uv'] for s in samples])
energy = utils.collate_1d([s['energy'] for s in samples], 0.0)
mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \
if samples[0]['mel2ph'] is not None else None
batch.update({
'mel2ph': mel2ph,
'energy': energy,
'pitch': pitch,
'f0': f0,
'uv': uv,
})
if hparams['pitch_type'] == 'cwt':
cwt_spec = utils.collate_2d([s['cwt_spec'] for s in samples])
f0_mean = torch.Tensor([s['f0_mean'] for s in samples])
f0_std = torch.Tensor([s['f0_std'] for s in samples])
batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std})
return batch
def load_test_inputs(self):
binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
ph_set = json.load(open(ph_set_fn, 'r'))
print("| phone set: ", ph_set)
phone_encoder = build_phone_encoder(hparams['binary_data_dir'])
word_encoder = None
voice_encoder = VoiceEncoder().cuda()
encoder = [phone_encoder, word_encoder]
sizes = []
items = []
for i in range(len(self.avail_idxs)):
item = self._get_item(i)
item2tgfn = f"{hparams['test_input_dir'].replace('binary', 'processed')}/mfa_outputs/{item['item_name']}.TextGrid"
item = binarizer_cls.process_item(item['item_name'], item['ph'], item['txt'], item2tgfn,
item['wav_fn'], item['spk_id'], encoder, hparams['binarization_args'])
item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \
if hparams['binarization_args']['with_spk_embed'] else None # 判断是否保存embedding文件
items.append(item)
sizes.append(item['len'])
return items, sizes
class FastSpeechWordDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(FastSpeechWordDataset, self).__getitem__(index)
item = self._get_item(index)
max_frames = hparams['max_frames']
sample["ph_words"] = item["ph_words"]
sample["word_tokens"] = torch.LongTensor(item["word_tokens"])
sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames]
sample["ph2word"] = torch.LongTensor(item['ph2word'][:hparams['max_input_tokens']])
return sample
def collater(self, samples):
batch = super(FastSpeechWordDataset, self).collater(samples)
ph_words = [s['ph_words'] for s in samples]
batch['ph_words'] = ph_words
word_tokens = utils.collate_1d([s['word_tokens'] for s in samples], 0)
batch['word_tokens'] = word_tokens
mel2word = utils.collate_1d([s['mel2word'] for s in samples], 0)
batch['mel2word'] = mel2word
ph2word = utils.collate_1d([s['ph2word'] for s in samples], 0)
batch['ph2word'] = ph2word
return batch
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/dataset_utils.py |
import importlib
from data_gen.tts.base_binarizer import BaseBinarizer
from data_gen.tts.base_preprocess import BasePreprocessor
from data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls
from utils.hparams import hparams
def parse_dataset_configs():
max_tokens = hparams['max_tokens']
max_sentences = hparams['max_sentences']
max_valid_tokens = hparams['max_valid_tokens']
if max_valid_tokens == -1:
hparams['max_valid_tokens'] = max_valid_tokens = max_tokens
max_valid_sentences = hparams['max_valid_sentences']
if max_valid_sentences == -1:
hparams['max_valid_sentences'] = max_valid_sentences = max_sentences
return max_tokens, max_sentences, max_valid_tokens, max_valid_sentences
def parse_mel_losses():
mel_losses = hparams['mel_losses'].split("|")
loss_and_lambda = {}
for i, l in enumerate(mel_losses):
if l == '':
continue
if ':' in l:
l, lbd = l.split(":")
lbd = float(lbd)
else:
lbd = 1.0
loss_and_lambda[l] = lbd
print("| Mel losses:", loss_and_lambda)
return loss_and_lambda
def load_data_preprocessor():
preprocess_cls = hparams["preprocess_cls"]
pkg = ".".join(preprocess_cls.split(".")[:-1])
cls_name = preprocess_cls.split(".")[-1]
preprocessor: BasePreprocessor = getattr(importlib.import_module(pkg), cls_name)()
preprocess_args = {}
preprocess_args.update(hparams['preprocess_args'])
return preprocessor, preprocess_args
def load_data_binarizer():
binarizer_cls = hparams['binarizer_cls']
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer: BaseBinarizer = getattr(importlib.import_module(pkg), cls_name)()
binarization_args = {}
binarization_args.update(hparams['binarization_args'])
return binarizer, binarization_args | EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/tts_utils.py |
from tasks.tts.fs2 import FastSpeech2Task
from modules.syntaspeech.multi_window_disc import Discriminator
from utils.hparams import hparams
from torch import nn
import torch
import torch.optim
import torch.utils.data
import utils
class FastSpeech2AdvTask(FastSpeech2Task):
def build_model(self):
self.build_tts_model()
if hparams['load_ckpt'] != '':
self.load_ckpt(hparams['load_ckpt'], strict=False)
utils.print_arch(self.model, 'Generator')
self.build_disc_model()
if not hasattr(self, 'gen_params'):
self.gen_params = list(self.model.parameters())
return self.model
def build_disc_model(self):
disc_win_num = hparams['disc_win_num']
h = hparams['mel_disc_hidden_size']
self.mel_disc = Discriminator(
time_lengths=[32, 64, 128][:disc_win_num],
freq_length=80, hidden_size=h, kernel=(3, 3)
)
self.disc_params = list(self.mel_disc.parameters())
utils.print_arch(self.mel_disc, model_name='Mel Disc')
def _training_step(self, sample, batch_idx, optimizer_idx):
log_outputs = {}
loss_weights = {}
disc_start = hparams['mel_gan'] and self.global_step >= hparams["disc_start_steps"] and \
hparams['lambda_mel_adv'] > 0
if optimizer_idx == 0:
#######################
# Generator #
#######################
log_outputs, model_out = self.run_model(self.model, sample, return_output=True)
self.model_out = {k: v.detach() for k, v in model_out.items() if isinstance(v, torch.Tensor)}
if disc_start:
self.disc_cond = disc_cond = self.model_out['decoder_inp'].detach() \
if hparams['use_cond_disc'] else None
if hparams['mel_loss_no_noise']:
self.add_mel_loss(model_out['mel_out_nonoise'], sample['mels'], log_outputs)
mel_p = model_out['mel_out']
if hasattr(self.model, 'out2mel'):
mel_p = self.model.out2mel(mel_p)
o_ = self.mel_disc(mel_p, disc_cond)
p_, pc_ = o_['y'], o_['y_c']
if p_ is not None:
log_outputs['a'] = self.mse_loss_fn(p_, p_.new_ones(p_.size()))
loss_weights['a'] = hparams['lambda_mel_adv']
if pc_ is not None:
log_outputs['ac'] = self.mse_loss_fn(pc_, pc_.new_ones(pc_.size()))
loss_weights['ac'] = hparams['lambda_mel_adv']
else:
#######################
# Discriminator #
#######################
if disc_start and self.global_step % hparams['disc_interval'] == 0:
if hparams['rerun_gen']:
with torch.no_grad():
_, model_out = self.run_model(self.model, sample, return_output=True)
else:
model_out = self.model_out
mel_g = sample['mels']
mel_p = model_out['mel_out']
if hasattr(self.model, 'out2mel'):
mel_p = self.model.out2mel(mel_p)
o = self.mel_disc(mel_g, self.disc_cond)
p, pc = o['y'], o['y_c']
o_ = self.mel_disc(mel_p, self.disc_cond)
p_, pc_ = o_['y'], o_['y_c']
if p_ is not None:
log_outputs["r"] = self.mse_loss_fn(p, p.new_ones(p.size()))
log_outputs["f"] = self.mse_loss_fn(p_, p_.new_zeros(p_.size()))
if pc_ is not None:
log_outputs["rc"] = self.mse_loss_fn(pc, pc.new_ones(pc.size()))
log_outputs["fc"] = self.mse_loss_fn(pc_, pc_.new_zeros(pc_.size()))
if len(log_outputs) == 0:
return None
total_loss = sum([loss_weights.get(k, 1) * v for k, v in log_outputs.items()])
log_outputs['bs'] = sample['mels'].shape[0]
return total_loss, log_outputs
def configure_optimizers(self):
if not hasattr(self, 'gen_params'):
self.gen_params = list(self.model.parameters())
optimizer_gen = torch.optim.AdamW(
self.gen_params,
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
optimizer_disc = torch.optim.AdamW(
self.disc_params,
lr=hparams['disc_lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
**hparams["discriminator_optimizer_params"]) if len(self.disc_params) > 0 else None
self.scheduler = self.build_scheduler({'gen': optimizer_gen, 'disc': optimizer_disc})
return [optimizer_gen, optimizer_disc]
def build_scheduler(self, optimizer):
return {
"gen": super().build_scheduler(optimizer['gen']),
"disc": torch.optim.lr_scheduler.StepLR(
optimizer=optimizer["disc"],
**hparams["discriminator_scheduler_params"]) if optimizer["disc"] is not None else None,
}
def on_before_optimization(self, opt_idx):
if opt_idx == 0:
nn.utils.clip_grad_norm_(self.gen_params, hparams['generator_grad_norm'])
else:
nn.utils.clip_grad_norm_(self.disc_params, hparams["discriminator_grad_norm"])
def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx):
if optimizer_idx == 0:
self.scheduler['gen'].step(self.global_step)
else:
self.scheduler['disc'].step(max(self.global_step - hparams["disc_start_steps"], 1))
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/fs2_adv.py |
import matplotlib
matplotlib.use('Agg')
from utils import audio
import matplotlib.pyplot as plt
from data_gen.tts.data_gen_utils import get_pitch
from tasks.tts.fs2_utils import FastSpeechDataset
from utils.cwt import cwt2f0
from utils.pl_utils import data_loader
import os
from multiprocessing.pool import Pool
from tqdm import tqdm
from modules.fastspeech.tts_modules import mel2ph_to_dur
from utils.hparams import hparams
from utils.plot import spec_to_figure, dur_to_figure, f0_to_figure
from utils.pitch_utils import denorm_f0
from modules.fastspeech.fs2 import FastSpeech2
from tasks.tts.tts import TtsTask
import torch
import torch.optim
import torch.utils.data
import torch.nn.functional as F
import utils
import torch.distributions
import numpy as np
from modules.commons.ssim import ssim
class FastSpeech2Task(TtsTask):
def __init__(self):
super(FastSpeech2Task, self).__init__()
self.dataset_cls = FastSpeechDataset
self.mse_loss_fn = torch.nn.MSELoss()
mel_losses = hparams['mel_loss'].split("|")
self.loss_and_lambda = {}
for i, l in enumerate(mel_losses):
if l == '':
continue
if ':' in l:
l, lbd = l.split(":")
lbd = float(lbd)
else:
lbd = 1.0
self.loss_and_lambda[l] = lbd
print("| Mel losses:", self.loss_and_lambda)
self.sil_ph = self.phone_encoder.sil_phonemes()
@data_loader
def train_dataloader(self):
train_dataset = self.dataset_cls(hparams['train_set_name'], shuffle=True)
return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences,
endless=hparams['endless_ds'])
@data_loader
def val_dataloader(self):
valid_dataset = self.dataset_cls(hparams['valid_set_name'], shuffle=False)
return self.build_dataloader(valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)
@data_loader
def test_dataloader(self):
test_dataset = self.dataset_cls(hparams['test_set_name'], shuffle=False)
return self.build_dataloader(test_dataset, False, self.max_eval_tokens,
self.max_eval_sentences, batch_by_size=False)
def build_tts_model(self):
self.model = FastSpeech2(self.phone_encoder)
def build_model(self):
self.build_tts_model()
if hparams['load_ckpt'] != '':
self.load_ckpt(hparams['load_ckpt'], strict=True)
utils.print_arch(self.model)
return self.model
def _training_step(self, sample, batch_idx, _):
loss_output = self.run_model(self.model, sample)
total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
loss_output['batch_size'] = sample['txt_tokens'].size()[0]
return total_loss, loss_output
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
mel_out = self.model.out2mel(model_out['mel_out'])
outputs = utils.tensors_to_scalars(outputs)
# if sample['mels'].shape[0] == 1:
# self.add_laplace_var(mel_out, sample['mels'], outputs)
if batch_idx < hparams['num_valid_plots']:
self.plot_mel(batch_idx, sample['mels'], mel_out)
self.plot_dur(batch_idx, sample, model_out)
if hparams['use_pitch_embed']:
self.plot_pitch(batch_idx, sample, model_out)
return outputs
def _validation_end(self, outputs):
all_losses_meter = {
'total_loss': utils.AvgrageMeter(),
}
for output in outputs:
n = output['nsamples']
for k, v in output['losses'].items():
if k not in all_losses_meter:
all_losses_meter[k] = utils.AvgrageMeter()
all_losses_meter[k].update(v, n)
all_losses_meter['total_loss'].update(output['total_loss'], n)
return {k: round(v.avg, 4) for k, v in all_losses_meter.items()}
def run_model(self, model, sample, return_output=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False)
losses = {}
self.add_mel_loss(output['mel_out'], target, losses)
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
############
# losses
############
def add_mel_loss(self, mel_out, target, losses, postfix='', mel_mix_loss=None):
if mel_mix_loss is None:
for loss_name, lbd in self.loss_and_lambda.items():
if 'l1' == loss_name:
l = self.l1_loss(mel_out, target)
elif 'mse' == loss_name:
raise NotImplementedError
elif 'ssim' == loss_name:
l = self.ssim_loss(mel_out, target)
elif 'gdl' == loss_name:
raise NotImplementedError
losses[f'{loss_name}{postfix}'] = l * lbd
else:
raise NotImplementedError
def l1_loss(self, decoder_output, target):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
l1_loss = F.l1_loss(decoder_output, target, reduction='none')
weights = self.weights_nonzero_speech(target)
l1_loss = (l1_loss * weights).sum() / weights.sum()
return l1_loss
def ssim_loss(self, decoder_output, target, bias=6.0):
# decoder_output : B x T x n_mel
# target : B x T x n_mel
assert decoder_output.shape == target.shape
weights = self.weights_nonzero_speech(target)
decoder_output = decoder_output[:, None] + bias
target = target[:, None] + bias
ssim_loss = 1 - ssim(decoder_output, target, size_average=False)
ssim_loss = (ssim_loss * weights).sum() / weights.sum()
return ssim_loss
def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None):
"""
:param dur_pred: [B, T], float, log scale
:param mel2ph: [B, T]
:param txt_tokens: [B, T]
:param losses:
:return:
"""
B, T = txt_tokens.shape
nonpadding = (txt_tokens != 0).float()
dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
is_sil = torch.zeros_like(txt_tokens).bool()
for p in self.sil_ph:
is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
is_sil = is_sil.float() # [B, T_txt]
# phone duration loss
if hparams['dur_loss'] == 'mse':
losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
dur_pred = (dur_pred.exp() - 1).clamp(min=0)
elif hparams['dur_loss'] == 'mog':
return NotImplementedError
elif hparams['dur_loss'] == 'crf':
losses['pdur'] = -self.model.dur_predictor.crf(
dur_pred, dur_gt.long().clamp(min=0, max=31), mask=nonpadding > 0, reduction='mean')
losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur']
# use linear scale for sent and word duration
if hparams['lambda_word_dur'] > 0:
word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long()
word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:]
word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:]
wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none')
word_nonpadding = (word_dur_g > 0).float()
wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum()
losses['wdur'] = wdur_loss * hparams['lambda_word_dur']
if hparams['lambda_sent_dur'] > 0:
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean')
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
def add_pitch_loss(self, output, sample, losses):
if hparams['pitch_type'] == 'ph':
nonpadding = (sample['txt_tokens'] != 0).float()
pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss
losses['f0'] = (pitch_loss_fn(output['pitch_pred'][:, :, 0], sample['f0'],
reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_f0']
return
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
nonpadding = (mel2ph != 0).float()
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
cwt_pred = output['cwt'][:, :, :10]
f0_mean_pred = output['f0_mean']
f0_std_pred = output['f0_std']
losses['C'] = self.cwt_loss(cwt_pred, cwt_spec) * hparams['lambda_f0']
if hparams['use_uv']:
assert output['cwt'].shape[-1] == 11
uv_pred = output['cwt'][:, :, -1]
losses['uv'] = (F.binary_cross_entropy_with_logits(uv_pred, uv, reduction='none') * nonpadding) \
.sum() / nonpadding.sum() * hparams['lambda_uv']
losses['f0_mean'] = F.l1_loss(f0_mean_pred, f0_mean) * hparams['lambda_f0']
losses['f0_std'] = F.l1_loss(f0_std_pred, f0_std) * hparams['lambda_f0']
if hparams['cwt_add_f0_loss']:
f0_cwt_ = self.model.cwt2f0_norm(cwt_pred, f0_mean_pred, f0_std_pred, mel2ph)
self.add_f0_loss(f0_cwt_[:, :, None], f0, uv, losses, nonpadding=nonpadding)
elif hparams['pitch_type'] == 'frame':
self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding)
def add_f0_loss(self, p_pred, f0, uv, losses, nonpadding):
assert p_pred[..., 0].shape == f0.shape
if hparams['use_uv']:
assert p_pred[..., 1].shape == uv.shape
losses['uv'] = (F.binary_cross_entropy_with_logits(
p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_uv']
nonpadding = nonpadding * (uv == 0).float()
f0_pred = p_pred[:, :, 0]
if hparams['pitch_loss'] in ['l1', 'l2']:
pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss
losses['f0'] = (pitch_loss_fn(f0_pred, f0, reduction='none') * nonpadding).sum() \
/ nonpadding.sum() * hparams['lambda_f0']
elif hparams['pitch_loss'] == 'ssim':
return NotImplementedError
def cwt_loss(self, cwt_p, cwt_g):
if hparams['cwt_loss'] == 'l1':
return F.l1_loss(cwt_p, cwt_g)
if hparams['cwt_loss'] == 'l2':
return F.mse_loss(cwt_p, cwt_g)
if hparams['cwt_loss'] == 'ssim':
return self.ssim_loss(cwt_p, cwt_g, 20)
def add_energy_loss(self, energy_pred, energy, losses):
nonpadding = (energy != 0).float()
loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum()
loss = loss * hparams['lambda_energy']
losses['e'] = loss
############
# validation plots
############
def plot_mel(self, batch_idx, spec, spec_out, name=None):
spec_cat = torch.cat([spec, spec_out], -1)
name = f'mel_{batch_idx}' if name is None else name
vmin = hparams['mel_vmin']
vmax = hparams['mel_vmax']
self.logger.experiment.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step)
def plot_dur(self, batch_idx, sample, model_out):
T_txt = sample['txt_tokens'].shape[1]
dur_gt = mel2ph_to_dur(sample['mel2ph'], T_txt)[0]
dur_pred = self.model.dur_predictor.out2dur(model_out['dur']).float()
txt = self.phone_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
txt = txt.split(" ")
self.logger.experiment.add_figure(
f'dur_{batch_idx}', dur_to_figure(dur_gt, dur_pred, txt), self.global_step)
def plot_pitch(self, batch_idx, sample, model_out):
f0 = sample['f0']
if hparams['pitch_type'] == 'ph':
mel2ph = sample['mel2ph']
f0 = self.expand_f0_ph(f0, mel2ph)
f0_pred = self.expand_f0_ph(model_out['pitch_pred'][:, :, 0], mel2ph)
self.logger.experiment.add_figure(
f'f0_{batch_idx}', f0_to_figure(f0[0], None, f0_pred[0]), self.global_step)
return
f0 = denorm_f0(f0, sample['uv'], hparams)
if hparams['pitch_type'] == 'cwt':
# cwt
cwt_out = model_out['cwt']
cwt_spec = cwt_out[:, :, :10]
cwt = torch.cat([cwt_spec, sample['cwt_spec']], -1)
self.logger.experiment.add_figure(f'cwt_{batch_idx}', spec_to_figure(cwt[0]), self.global_step)
# f0
f0_pred = cwt2f0(cwt_spec, model_out['f0_mean'], model_out['f0_std'], hparams['cwt_scales'])
if hparams['use_uv']:
assert cwt_out.shape[-1] == 11
uv_pred = cwt_out[:, :, -1] > 0
f0_pred[uv_pred > 0] = 0
f0_cwt = denorm_f0(sample['f0_cwt'], sample['uv'], hparams)
self.logger.experiment.add_figure(
f'f0_{batch_idx}', f0_to_figure(f0[0], f0_cwt[0], f0_pred[0]), self.global_step)
elif hparams['pitch_type'] == 'frame':
# f0
uv_pred = model_out['pitch_pred'][:, :, 1] > 0
pitch_pred = denorm_f0(model_out['pitch_pred'][:, :, 0], uv_pred, hparams)
self.logger.experiment.add_figure(
f'f0_{batch_idx}', f0_to_figure(f0[0], None, pitch_pred[0]), self.global_step)
############
# infer
############
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
txt_tokens = sample['txt_tokens']
mel2ph, uv, f0 = None, None, None
ref_mels = None
if hparams['profile_infer']:
pass
else:
if hparams['use_gt_dur']:
mel2ph = sample['mel2ph']
if hparams['use_gt_f0']:
f0 = sample['f0']
uv = sample['uv']
print('Here using gt f0!!')
if hparams.get('use_midi') is not None and hparams['use_midi']:
outputs = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True,
pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
else:
outputs = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True)
sample['outputs'] = self.model.out2mel(outputs['mel_out'])
sample['mel2ph_pred'] = outputs['mel2ph']
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel
else:
sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
sample['f0_pred'] = outputs.get('f0_denorm')
return self.after_infer(sample)
def after_infer(self, predictions):
if self.saving_result_pool is None and not hparams['profile_infer']:
self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16))
self.saving_results_futures = []
predictions = utils.unpack_dict_to_list(predictions)
t = tqdm(predictions)
for num_predictions, prediction in enumerate(t):
for k, v in prediction.items():
if type(v) is torch.Tensor:
prediction[k] = v.cpu().numpy()
item_name = prediction.get('item_name')
text = prediction.get('text').replace(":", "%3A")[:80]
# remove paddings
mel_gt = prediction["mels"]
mel_gt_mask = np.abs(mel_gt).sum(-1) > 0
mel_gt = mel_gt[mel_gt_mask]
mel2ph_gt = prediction.get("mel2ph")
mel2ph_gt = mel2ph_gt[mel_gt_mask] if mel2ph_gt is not None else None
mel_pred = prediction["outputs"]
mel_pred_mask = np.abs(mel_pred).sum(-1) > 0
mel_pred = mel_pred[mel_pred_mask]
mel_gt = np.clip(mel_gt, hparams['mel_vmin'], hparams['mel_vmax'])
mel_pred = np.clip(mel_pred, hparams['mel_vmin'], hparams['mel_vmax'])
mel2ph_pred = prediction.get("mel2ph_pred")
if mel2ph_pred is not None:
if len(mel2ph_pred) > len(mel_pred_mask):
mel2ph_pred = mel2ph_pred[:len(mel_pred_mask)]
mel2ph_pred = mel2ph_pred[mel_pred_mask]
f0_gt = prediction.get("f0")
f0_pred = prediction.get("f0_pred")
if f0_pred is not None:
f0_gt = f0_gt[mel_gt_mask]
if len(f0_pred) > len(mel_pred_mask):
f0_pred = f0_pred[:len(mel_pred_mask)]
f0_pred = f0_pred[mel_pred_mask]
str_phs = None
if self.phone_encoder is not None and 'txt_tokens' in prediction:
str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True)
gen_dir = os.path.join(hparams['work_dir'],
f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
if not hparams['profile_infer']:
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/plot', exist_ok=True)
os.makedirs(os.path.join(hparams['work_dir'], 'P_mels_npy'), exist_ok=True)
os.makedirs(os.path.join(hparams['work_dir'], 'G_mels_npy'), exist_ok=True)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_pred, mel_pred, 'P', item_name, text, gen_dir, str_phs, mel2ph_pred, f0_gt, f0_pred]))
if mel_gt is not None and hparams['save_gt']:
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_gt, mel_gt, 'G', item_name, text, gen_dir, str_phs, mel2ph_gt, f0_gt, f0_pred]))
if hparams['save_f0']:
import matplotlib.pyplot as plt
# f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
f0_pred_ = f0_pred
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
fig = plt.figure()
plt.plot(f0_pred_, label=r'$f0_P$')
plt.plot(f0_gt_, label=r'$f0_G$')
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
# f0_midi = prediction.get("f0_midi")
# f0_midi = f0_midi[mel_gt_mask]
# plt.plot(f0_midi, label=r'$f0_M$')
pass
plt.legend()
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png')
plt.close(fig)
t.set_description(
f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
else:
if 'gen_wav_time' not in self.stats:
self.stats['gen_wav_time'] = 0
self.stats['gen_wav_time'] += len(wav_pred) / hparams['audio_sample_rate']
print('gen_wav_time: ', self.stats['gen_wav_time'])
return {}
@staticmethod
def save_result(wav_out, mel, prefix, item_name, text, gen_dir, str_phs=None, mel2ph=None, gt_f0=None, pred_f0=None):
item_name = item_name.replace('/', '-')
base_fn = f'[{item_name}][{prefix}]'
if text is not None:
base_fn += text
base_fn += ('-' + hparams['exp_name'])
np.save(os.path.join(hparams['work_dir'], f'{prefix}_mels_npy', item_name), mel)
audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
norm=hparams['out_wav_norm'])
fig = plt.figure(figsize=(14, 10))
spec_vmin = hparams['mel_vmin']
spec_vmax = hparams['mel_vmax']
heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax)
fig.colorbar(heatmap)
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = (gt_f0 - 100) / (800 - 100) * 80 * (gt_f0 > 0)
pred_f0 = (pred_f0 - 100) / (800 - 100) * 80 * (pred_f0 > 0)
plt.plot(pred_f0, c='white', linewidth=1, alpha=0.6)
plt.plot(gt_f0, c='red', linewidth=1, alpha=0.6)
else:
f0, _ = get_pitch(wav_out, mel, hparams)
f0 = (f0 - 100) / (800 - 100) * 80 * (f0 > 0)
plt.plot(f0, c='white', linewidth=1, alpha=0.6)
if mel2ph is not None and str_phs is not None:
decoded_txt = str_phs.split(" ")
dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy()
dur = [0] + list(np.cumsum(dur))
for i in range(len(dur) - 1):
shift = (i % 20) + 1
plt.text(dur[i], shift, decoded_txt[i])
plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black')
plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black',
alpha=1, linewidth=1)
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png', dpi=1000)
plt.close(fig)
##############
# utils
##############
@staticmethod
def expand_f0_ph(f0, mel2ph):
f0 = denorm_f0(f0, None, hparams)
f0 = F.pad(f0, [1, 0])
f0 = torch.gather(f0, 1, mel2ph) # [B, T_mel]
return f0
if __name__ == '__main__':
FastSpeech2Task.start()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/fs2.py |
import torch
from modules.portaspeech.portaspeech_flow import PortaSpeechFlow
from tasks.tts.fs2 import FastSpeech2Task
from tasks.tts.ps import PortaSpeechTask
from utils.pitch_utils import denorm_f0
from utils.hparams import hparams
class PortaSpeechFlowTask(PortaSpeechTask):
def __init__(self):
super().__init__()
self.training_post_glow = False
def build_tts_model(self):
ph_dict_size = len(self.token_encoder)
word_dict_size = len(self.word_encoder)
self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams)
def _training_step(self, sample, batch_idx, opt_idx):
self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \
and hparams['use_post_flow']
if hparams['two_stage'] and \
((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)):
return None
loss_output, _ = self.run_model(sample)
total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
loss_output['batch_size'] = sample['txt_tokens'].size()[0]
if 'postflow' in loss_output and loss_output['postflow'] is None:
return None
return total_loss, loss_output
def run_model(self, sample, infer=False, *args, **kwargs):
if not infer:
training_post_glow = self.training_post_glow
spk_embed = sample.get('spk_embed')
spk_id = sample.get('spk_ids')
output = self.model(sample['txt_tokens'],
sample['word_tokens'],
ph2word=sample['ph2word'],
mel2word=sample['mel2word'],
mel2ph=sample['mel2ph'],
word_len=sample['word_lengths'].max(),
tgt_mels=sample['mels'],
pitch=sample.get('pitch'),
spk_embed=spk_embed,
spk_id=spk_id,
infer=False,
forward_post_glow=training_post_glow,
two_stage=hparams['two_stage'],
global_step=self.global_step,
bert_feats=sample.get('bert_feats'))
losses = {}
self.add_mel_loss(output['mel_out'], sample['mels'], losses)
if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']:
losses['postflow'] = output['postflow']
losses['l1'] = losses['l1'].detach()
losses['ssim'] = losses['ssim'].detach()
if not training_post_glow or not hparams['two_stage'] or not self.training:
losses['kl'] = output['kl']
if self.global_step < hparams['kl_start_steps']:
losses['kl'] = losses['kl'].detach()
else:
losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min'])
losses['kl'] = losses['kl'] * hparams['lambda_kl']
if hparams['dur_level'] == 'word':
self.add_dur_loss(
output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
self.get_attn_stats(output['attn'], sample, losses)
else:
super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
return losses, output
else:
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \
and hparams['use_post_flow']
spk_embed = sample.get('spk_embed')
spk_id = sample.get('spk_ids')
output = self.model(
sample['txt_tokens'],
sample['word_tokens'],
ph2word=sample['ph2word'],
word_len=sample['word_lengths'].max(),
pitch=sample.get('pitch'),
mel2ph=sample['mel2ph'] if use_gt_dur else None,
mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None,
infer=True,
forward_post_glow=forward_post_glow,
spk_embed=spk_embed,
spk_id=spk_id,
two_stage=hparams['two_stage'],
bert_feats=sample.get('bert_feats'))
return output
def validation_step(self, sample, batch_idx):
self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \
and hparams['use_post_flow']
return super().validation_step(sample, batch_idx)
def save_valid_result(self, sample, batch_idx, model_out):
super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out)
sr = hparams['audio_sample_rate']
f0_gt = None
if sample.get('f0') is not None:
f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
if self.global_step > 0:
# save FVAE result
if hparams['use_post_flow']:
wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt)
self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0],
f'mel_fvae_{batch_idx}', f0s=f0_gt)
def build_optimizer(self, model):
if hparams['two_stage'] and hparams['use_post_flow']:
self.optimizer = torch.optim.AdamW(
[p for name, p in self.model.named_parameters() if 'post_flow' not in name],
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
self.post_flow_optimizer = torch.optim.AdamW(
self.model.post_flow.parameters(),
lr=hparams['post_flow_lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return [self.optimizer, self.post_flow_optimizer]
else:
self.optimizer = torch.optim.AdamW(
self.model.parameters(),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return [self.optimizer]
def build_scheduler(self, optimizer):
return FastSpeech2Task.build_scheduler(self, optimizer[0]) | EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/ps_flow.py |
import matplotlib
matplotlib.use('Agg')
import glob
import importlib
from utils.cwt import get_lf0_cwt
import os
import torch.optim
import torch.utils.data
from utils.indexed_datasets import IndexedDataset
from utils.pitch_utils import norm_interp_f0
import numpy as np
from tasks.base_task import BaseDataset
import torch
import torch.optim
import torch.utils.data
import utils
import torch.distributions
from utils.hparams import hparams
class FastSpeechDataset(BaseDataset):
def __init__(self, prefix, shuffle=False):
super().__init__(shuffle)
self.data_dir = hparams['binary_data_dir']
self.prefix = prefix
self.hparams = hparams
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
self.indexed_ds = None
# self.name2spk_id={}
# pitch stats
f0_stats_fn = f'{self.data_dir}/train_f0s_mean_std.npy'
if os.path.exists(f0_stats_fn):
hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = np.load(f0_stats_fn)
hparams['f0_mean'] = float(hparams['f0_mean'])
hparams['f0_std'] = float(hparams['f0_std'])
else:
hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = None, None
if prefix == 'test':
if hparams['test_input_dir'] != '':
self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir'])
else:
if hparams['num_test_samples'] > 0:
self.avail_idxs = list(range(hparams['num_test_samples'])) + hparams['test_ids']
self.sizes = [self.sizes[i] for i in self.avail_idxs]
if hparams['pitch_type'] == 'cwt':
_, hparams['cwt_scales'] = get_lf0_cwt(np.ones(10))
def _get_item(self, index):
if hasattr(self, 'avail_idxs') and self.avail_idxs is not None:
index = self.avail_idxs[index]
if self.indexed_ds is None:
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
return self.indexed_ds[index]
def __getitem__(self, index):
hparams = self.hparams
item = self._get_item(index)
max_frames = hparams['max_frames']
spec = torch.Tensor(item['mel'])[:max_frames]
energy = (spec.exp() ** 2).sum(-1).sqrt()
mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None
f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']])
pitch = torch.LongTensor(item.get("pitch"))[:max_frames]
# print(item.keys(), item['mel'].shape, spec.shape)
sample = {
"id": index,
"item_name": item['item_name'],
"text": item['txt'],
"txt_token": phone,
"mel": spec,
"pitch": pitch,
"energy": energy,
"f0": f0,
"uv": uv,
"mel2ph": mel2ph,
"mel_nonpadding": spec.abs().sum(-1) > 0,
}
if self.hparams['use_spk_embed']:
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
if self.hparams['use_spk_id']:
sample["spk_id"] = item['spk_id']
# sample['spk_id'] = 0
# for key in self.name2spk_id.keys():
# if key in item['item_name']:
# sample['spk_id'] = self.name2spk_id[key]
# break
if self.hparams['pitch_type'] == 'cwt':
cwt_spec = torch.Tensor(item['cwt_spec'])[:max_frames]
f0_mean = item.get('f0_mean', item.get('cwt_mean'))
f0_std = item.get('f0_std', item.get('cwt_std'))
sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std})
elif self.hparams['pitch_type'] == 'ph':
f0_phlevel_sum = torch.zeros_like(phone).float().scatter_add(0, mel2ph - 1, f0)
f0_phlevel_num = torch.zeros_like(phone).float().scatter_add(
0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1)
sample["f0_ph"] = f0_phlevel_sum / f0_phlevel_num
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s['id'] for s in samples])
item_names = [s['item_name'] for s in samples]
text = [s['text'] for s in samples]
txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0)
f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
pitch = utils.collate_1d([s['pitch'] for s in samples])
uv = utils.collate_1d([s['uv'] for s in samples])
energy = utils.collate_1d([s['energy'] for s in samples], 0.0)
mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \
if samples[0]['mel2ph'] is not None else None
mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples])
mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
batch = {
'id': id,
'item_name': item_names,
'nsamples': len(samples),
'text': text,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'mels': mels,
'mel_lengths': mel_lengths,
'mel2ph': mel2ph,
'energy': energy,
'pitch': pitch,
'f0': f0,
'uv': uv,
}
if self.hparams['use_spk_embed']:
spk_embed = torch.stack([s['spk_embed'] for s in samples])
batch['spk_embed'] = spk_embed
if self.hparams['use_spk_id']:
spk_ids = torch.LongTensor([s['spk_id'] for s in samples])
batch['spk_ids'] = spk_ids
if self.hparams['pitch_type'] == 'cwt':
cwt_spec = utils.collate_2d([s['cwt_spec'] for s in samples])
f0_mean = torch.Tensor([s['f0_mean'] for s in samples])
f0_std = torch.Tensor([s['f0_std'] for s in samples])
batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std})
elif self.hparams['pitch_type'] == 'ph':
batch['f0'] = utils.collate_1d([s['f0_ph'] for s in samples])
return batch
def load_test_inputs(self, test_input_dir, spk_id=0):
inp_wav_paths = glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/*.mp3')
sizes = []
items = []
binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
binarization_args = hparams['binarization_args']
for wav_fn in inp_wav_paths:
item_name = os.path.basename(wav_fn)
ph = txt = tg_fn = ''
wav_fn = wav_fn
encoder = None
item = binarizer_cls.process_item(item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args)
items.append(item)
sizes.append(item['len'])
return items, sizes
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/fs2_utils.py |
import os
import torch
import torch.nn.functional as F
from torch import nn
from modules.tts.syntaspeech.syntaspeech import SyntaSpeech
from tasks.tts.ps_adv import PortaSpeechAdvTask
from utils.hparams import hparams
class SyntaSpeechTask(PortaSpeechAdvTask):
def build_tts_model(self):
ph_dict_size = len(self.token_encoder)
word_dict_size = len(self.word_encoder)
self.model = SyntaSpeech(ph_dict_size, word_dict_size, hparams)
self.gen_params = [p for p in self.model.parameters() if p.requires_grad]
self.dp_params = [p for k, p in self.model.named_parameters() if (('dur_predictor' in k) and p.requires_grad)]
self.gen_params_except_dp = [p for k, p in self.model.named_parameters() if (('dur_predictor' not in k) and p.requires_grad)]
self.bert_params = [p for k, p in self.model.named_parameters() if (('bert' in k) and p.requires_grad)]
self.gen_params_except_bert_and_dp = [p for k, p in self.model.named_parameters() if ('dur_predictor' not in k) and ('bert' not in k) and p.requires_grad ]
self.use_bert = True if len(self.bert_params) > 0 else False
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/synta.py |
import os
import torch
import torch.nn.functional as F
from torch import nn
from modules.portaspeech.portaspeech import PortaSpeech
from tasks.tts.fs2 import FastSpeech2Task
from utils.tts_utils import mel2token_to_dur
from utils.hparams import hparams
from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate
from utils import num_params
import numpy as np
from utils.plot import spec_to_figure
from data_gen.tts.data_gen_utils import build_token_encoder
class PortaSpeechTask(FastSpeech2Task):
def __init__(self):
super().__init__()
data_dir = hparams['binary_data_dir']
self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json')
def build_tts_model(self):
ph_dict_size = len(self.token_encoder)
word_dict_size = len(self.word_encoder)
self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams)
def on_train_start(self):
super().on_train_start()
for n, m in self.model.named_children():
num_params(m, model_name=n)
if hasattr(self.model, 'fvae'):
for n, m in self.model.fvae.named_children():
num_params(m, model_name=f'fvae.{n}')
def run_model(self, sample, infer=False, *args, **kwargs):
txt_tokens = sample['txt_tokens']
word_tokens = sample['word_tokens']
spk_embed = sample.get('spk_embed')
spk_id = sample.get('spk_ids')
if not infer:
output = self.model(txt_tokens, word_tokens,
ph2word=sample['ph2word'],
mel2word=sample['mel2word'],
mel2ph=sample['mel2ph'],
word_len=sample['word_lengths'].max(),
tgt_mels=sample['mels'],
pitch=sample.get('pitch'),
spk_embed=spk_embed,
spk_id=spk_id,
infer=False,
global_step=self.global_step)
losses = {}
losses['kl_v'] = output['kl'].detach()
losses_kl = output['kl']
losses_kl = torch.clamp(losses_kl, min=hparams['kl_min'])
losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl
losses_kl = losses_kl * hparams['lambda_kl']
losses['kl'] = losses_kl
self.add_mel_loss(output['mel_out'], sample['mels'], losses)
if hparams['dur_level'] == 'word':
self.add_dur_loss(
output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
self.get_attn_stats(output['attn'], sample, losses)
else:
super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
return losses, output
else:
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
output = self.model(
txt_tokens, word_tokens,
ph2word=sample['ph2word'],
word_len=sample['word_lengths'].max(),
pitch=sample.get('pitch'),
mel2ph=sample['mel2ph'] if use_gt_dur else None,
mel2word=sample['mel2word'] if use_gt_dur else None,
tgt_mels=sample['mels'],
infer=True,
spk_embed=spk_embed,
spk_id=spk_id,
)
return output
def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None):
T = word_len.max()
dur_gt = mel2token_to_dur(mel2token, T).float()
nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float()
dur_pred = dur_pred * nonpadding
dur_gt = dur_gt * nonpadding
wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
wdur = (wdur * nonpadding).sum() / nonpadding.sum()
if hparams['lambda_word_dur'] > 0:
losses['wdur'] = wdur * hparams['lambda_word_dur']
if hparams['lambda_sent_dur'] > 0:
sent_dur_p = dur_pred.sum(-1)
sent_dur_g = dur_gt.sum(-1)
sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean')
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
def validation_step(self, sample, batch_idx):
return super().validation_step(sample, batch_idx)
def save_valid_result(self, sample, batch_idx, model_out):
super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out)
if self.global_step > 0 and hparams['dur_level'] == 'word':
self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
def get_attn_stats(self, attn, sample, logging_outputs, prefix=''):
# diagonal_focus_rate
txt_lengths = sample['txt_lengths'].float()
mel_lengths = sample['mel_lengths'].float()
src_padding_mask = sample['txt_tokens'].eq(0)
target_padding_mask = sample['mels'].abs().sum(-1).eq(0)
src_seg_mask = sample['txt_tokens'].eq(self.seg_idx)
attn_ks = txt_lengths.float() / mel_lengths.float()
focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data
phone_coverage_rate = get_phone_coverage_rate(
attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()
diagonal_focus_rate, diag_mask = get_diagonal_focus_rate(
attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask)
logging_outputs[f'{prefix}fr'] = focus_rate.mean().data
logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data
logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data
def get_plot_dur_info(self, sample, model_out):
if hparams['dur_level'] == 'word':
T_txt = sample['word_lengths'].max()
dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
txt = sample['ph_words'][0].split(" ")
else:
T_txt = sample['txt_tokens'].shape[1]
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
txt = txt.split(" ")
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
def build_optimizer(self, model):
self.optimizer = torch.optim.AdamW(
self.model.parameters(),
lr=hparams['lr'],
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
weight_decay=hparams['weight_decay'])
return self.optimizer
def build_scheduler(self, optimizer):
return FastSpeechTask.build_scheduler(self, optimizer)
############
# infer
############
def test_start(self):
super().test_start()
if hparams.get('save_attn', False):
os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
self.model.store_inverse_all()
def test_step(self, sample, batch_idx):
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
outputs = self.run_model(sample, infer=True)
text = sample['text'][0]
item_name = sample['item_name'][0]
tokens = sample['txt_tokens'][0].cpu().numpy()
mel_gt = sample['mels'][0].cpu().numpy()
mel_pred = outputs['mel_out'][0].cpu().numpy()
mel2ph = sample['mel2ph'][0].cpu().numpy()
mel2ph_pred = None
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
if text is not None:
base_fn += text.replace(":", "$3A")[:80]
base_fn = base_fn.replace(' ', '_')
gen_dir = self.gen_dir
wav_pred = self.vocoder.spec2wav(mel_pred)
self.saving_result_pool.add_job(self.save_result, args=[
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
if hparams['save_gt']:
wav_gt = self.vocoder.spec2wav(mel_gt)
self.saving_result_pool.add_job(self.save_result, args=[
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
if hparams.get('save_attn', False):
attn = outputs['attn'][0].cpu().numpy()
np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
return {
'item_name': item_name,
'text': text,
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
'wav_fn_pred': base_fn % 'P',
'wav_fn_gt': base_fn % 'G',
}
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/ps.py |
import matplotlib
matplotlib.use('Agg')
import torch
import numpy as np
import os
from tasks.base_task import BaseDataset
from tasks.tts.fs2 import FastSpeech2Task
from modules.fastspeech.pe import PitchExtractor
import utils
from utils.indexed_datasets import IndexedDataset
from utils.hparams import hparams
from utils.plot import f0_to_figure
from utils.pitch_utils import norm_interp_f0, denorm_f0
class PeDataset(BaseDataset):
def __init__(self, prefix, shuffle=False):
super().__init__(shuffle)
self.data_dir = hparams['binary_data_dir']
self.prefix = prefix
self.hparams = hparams
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
self.indexed_ds = None
# pitch stats
f0_stats_fn = f'{self.data_dir}/train_f0s_mean_std.npy'
if os.path.exists(f0_stats_fn):
hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = np.load(f0_stats_fn)
hparams['f0_mean'] = float(hparams['f0_mean'])
hparams['f0_std'] = float(hparams['f0_std'])
else:
hparams['f0_mean'], hparams['f0_std'] = self.f0_mean, self.f0_std = None, None
if prefix == 'test':
if hparams['num_test_samples'] > 0:
self.avail_idxs = list(range(hparams['num_test_samples'])) + hparams['test_ids']
self.sizes = [self.sizes[i] for i in self.avail_idxs]
def _get_item(self, index):
if hasattr(self, 'avail_idxs') and self.avail_idxs is not None:
index = self.avail_idxs[index]
if self.indexed_ds is None:
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
return self.indexed_ds[index]
def __getitem__(self, index):
hparams = self.hparams
item = self._get_item(index)
max_frames = hparams['max_frames']
spec = torch.Tensor(item['mel'])[:max_frames]
# mel2ph = torch.LongTensor(item['mel2ph'])[:max_frames] if 'mel2ph' in item else None
f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
pitch = torch.LongTensor(item.get("pitch"))[:max_frames]
# print(item.keys(), item['mel'].shape, spec.shape)
sample = {
"id": index,
"item_name": item['item_name'],
"text": item['txt'],
"mel": spec,
"pitch": pitch,
"f0": f0,
"uv": uv,
# "mel2ph": mel2ph,
# "mel_nonpadding": spec.abs().sum(-1) > 0,
}
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s['id'] for s in samples])
item_names = [s['item_name'] for s in samples]
text = [s['text'] for s in samples]
f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
pitch = utils.collate_1d([s['pitch'] for s in samples])
uv = utils.collate_1d([s['uv'] for s in samples])
mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
# mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \
# if samples[0]['mel2ph'] is not None else None
# mel_nonpaddings = utils.collate_1d([s['mel_nonpadding'].float() for s in samples], 0.0)
batch = {
'id': id,
'item_name': item_names,
'nsamples': len(samples),
'text': text,
'mels': mels,
'mel_lengths': mel_lengths,
'pitch': pitch,
# 'mel2ph': mel2ph,
# 'mel_nonpaddings': mel_nonpaddings,
'f0': f0,
'uv': uv,
}
return batch
class PitchExtractionTask(FastSpeech2Task):
def __init__(self):
super().__init__()
self.dataset_cls = PeDataset
def build_tts_model(self):
self.model = PitchExtractor(conv_layers=hparams['pitch_extractor_conv_layers'])
# def build_scheduler(self, optimizer):
# return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5)
def _training_step(self, sample, batch_idx, _):
loss_output = self.run_model(self.model, sample)
total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
loss_output['batch_size'] = sample['mels'].size()[0]
return total_loss, loss_output
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=True)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
self.plot_pitch(batch_idx, model_out, sample)
return outputs
def run_model(self, model, sample, return_output=False, infer=False):
f0 = sample['f0']
uv = sample['uv']
output = model(sample['mels'])
losses = {}
self.add_pitch_loss(output, sample, losses)
if not return_output:
return losses
else:
return losses, output
def plot_pitch(self, batch_idx, model_out, sample):
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
self.logger.experiment.add_figure(
f'f0_{batch_idx}',
f0_to_figure(gt_f0[0], None, model_out['f0_denorm_pred'][0]),
self.global_step)
def add_pitch_loss(self, output, sample, losses):
# mel2ph = sample['mel2ph'] # [B, T_s]
mel = sample['mels']
f0 = sample['f0']
uv = sample['uv']
# nonpadding = (mel2ph != 0).float() if hparams['pitch_type'] == 'frame' \
# else (sample['txt_tokens'] != 0).float()
nonpadding = (mel.abs().sum(-1) > 0).float() # sample['mel_nonpaddings']
# print(nonpadding[0][-8:], nonpadding.shape)
self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding) | EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/tts/pe.py |
import glob
import importlib
import os
from resemblyzer import VoiceEncoder
import numpy as np
import torch
import torch.distributed as dist
from torch.utils.data import DistributedSampler
import utils
from tasks.base_task import BaseDataset
from utils.hparams import hparams
from utils.indexed_datasets import IndexedDataset
from tqdm import tqdm
class EndlessDistributedSampler(DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.shuffle = shuffle
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = [i for _ in range(1000) for i in torch.randperm(
len(self.dataset), generator=g).tolist()]
else:
indices = [i for _ in range(1000) for i in list(range(len(self.dataset)))]
indices = indices[:len(indices) // self.num_replicas * self.num_replicas]
indices = indices[self.rank::self.num_replicas]
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
class VocoderDataset(BaseDataset):
def __init__(self, prefix, shuffle=False):
super().__init__(shuffle)
self.hparams = hparams
self.prefix = prefix
self.data_dir = hparams['binary_data_dir']
self.is_infer = prefix == 'test'
self.batch_max_frames = 0 if self.is_infer else hparams['max_samples'] // hparams['hop_size']
self.aux_context_window = hparams['aux_context_window']
self.hop_size = hparams['hop_size']
if self.is_infer and hparams['test_input_dir'] != '':
self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir'])
self.avail_idxs = [i for i, _ in enumerate(self.sizes)]
elif self.is_infer and hparams['test_mel_dir'] != '':
self.indexed_ds, self.sizes = self.load_mel_inputs(hparams['test_mel_dir'])
self.avail_idxs = [i for i, _ in enumerate(self.sizes)]
else:
self.indexed_ds = None
self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy')
self.avail_idxs = [idx for idx, s in enumerate(self.sizes) if
s - 2 * self.aux_context_window > self.batch_max_frames]
print(f"| {len(self.sizes) - len(self.avail_idxs)} short items are skipped in {prefix} set.")
self.sizes = [s for idx, s in enumerate(self.sizes) if
s - 2 * self.aux_context_window > self.batch_max_frames]
def _get_item(self, index):
if self.indexed_ds is None:
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
item = self.indexed_ds[index]
return item
def __getitem__(self, index):
index = self.avail_idxs[index]
item = self._get_item(index)
sample = {
"id": index,
"item_name": item['item_name'],
"mel": torch.FloatTensor(item['mel']),
"wav": torch.FloatTensor(item['wav'].astype(np.float32)),
}
if 'pitch' in item:
sample['pitch'] = torch.LongTensor(item['pitch'])
sample['f0'] = torch.FloatTensor(item['f0'])
if hparams.get('use_spk_embed', False):
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
if hparams.get('use_emo_embed', False):
sample["emo_embed"] = torch.Tensor(item['emo_embed'])
return sample
def collater(self, batch):
if len(batch) == 0:
return {}
y_batch, c_batch, p_batch, f0_batch = [], [], [], []
item_name = []
have_pitch = 'pitch' in batch[0]
for idx in range(len(batch)):
item_name.append(batch[idx]['item_name'])
x, c = batch[idx]['wav'] if self.hparams['use_wav'] else None, batch[idx]['mel'].squeeze(0)
if have_pitch:
p = batch[idx]['pitch']
f0 = batch[idx]['f0']
if self.hparams['use_wav']:self._assert_ready_for_upsampling(x, c, self.hop_size, 0)
if len(c) - 2 * self.aux_context_window > self.batch_max_frames:
# randomly pickup with the batch_max_steps length of the part
batch_max_frames = self.batch_max_frames if self.batch_max_frames != 0 else len(
c) - 2 * self.aux_context_window - 1
batch_max_steps = batch_max_frames * self.hop_size
interval_start = self.aux_context_window
interval_end = len(c) - batch_max_frames - self.aux_context_window
start_frame = np.random.randint(interval_start, interval_end)
start_step = start_frame * self.hop_size
if self.hparams['use_wav']:y = x[start_step: start_step + batch_max_steps]
c = c[start_frame - self.aux_context_window:
start_frame + self.aux_context_window + batch_max_frames]
if have_pitch:
p = p[start_frame - self.aux_context_window:
start_frame + self.aux_context_window + batch_max_frames]
f0 = f0[start_frame - self.aux_context_window:
start_frame + self.aux_context_window + batch_max_frames]
if self.hparams['use_wav']:self._assert_ready_for_upsampling(y, c, self.hop_size, self.aux_context_window)
else:
print(f"Removed short sample from batch (length={len(x)}).")
continue
if self.hparams['use_wav']:y_batch += [y.reshape(-1, 1)] # [(T, 1), (T, 1), ...]
c_batch += [c] # [(T' C), (T' C), ...]
if have_pitch:
p_batch += [p] # [(T' C), (T' C), ...]
f0_batch += [f0] # [(T' C), (T' C), ...]
# convert each batch to tensor, asuume that each item in batch has the same length
if self.hparams['use_wav']:y_batch = utils.collate_2d(y_batch, 0).transpose(2, 1) # (B, 1, T)
c_batch = utils.collate_2d(c_batch, 0).transpose(2, 1) # (B, C, T')
if have_pitch:
p_batch = utils.collate_1d(p_batch, 0) # (B, T')
f0_batch = utils.collate_1d(f0_batch, 0) # (B, T')
else:
p_batch, f0_batch = None, None
# make input noise signal batch tensor
if self.hparams['use_wav']: z_batch = torch.randn(y_batch.size()) # (B, 1, T)
else: z_batch=[]
return {
'z': z_batch,
'mels': c_batch,
'wavs': y_batch,
'pitches': p_batch,
'f0': f0_batch,
'item_name': item_name
}
@staticmethod
def _assert_ready_for_upsampling(x, c, hop_size, context_window):
"""Assert the audio and feature lengths are correctly adjusted for upsamping."""
assert len(x) == (len(c) - 2 * context_window) * hop_size
def load_test_inputs(self, test_input_dir, spk_id=0):
inp_wav_paths = sorted(glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/**/*.mp3'))
sizes = []
items = []
binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
binarization_args = hparams['binarization_args']
for wav_fn in inp_wav_paths:
item_name = wav_fn[len(test_input_dir) + 1:].replace("/", "_")
item = binarizer_cls.process_item(
item_name, wav_fn, binarization_args)
items.append(item)
sizes.append(item['len'])
return items, sizes
def load_mel_inputs(self, test_input_dir, spk_id=0):
inp_mel_paths = sorted(glob.glob(f'{test_input_dir}/*.npy'))
sizes = []
items = []
binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
binarization_args = hparams['binarization_args']
for mel in inp_mel_paths:
mel_input = np.load(mel)
mel_input = torch.FloatTensor(mel_input)
item_name = mel[len(test_input_dir) + 1:].replace("/", "_")
item = binarizer_cls.process_mel_item(item_name, mel_input, None, binarization_args)
items.append(item)
sizes.append(item['len'])
return items, sizes
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/vocoder/dataset_utils.py |
import os
import torch
import torch.distributed as dist
from torch.utils.data import DistributedSampler
from tasks.base_task import BaseTask
from tasks.base_task import data_loader
from tasks.vocoder.dataset_utils import VocoderDataset, EndlessDistributedSampler
from utils.hparams import hparams
class VocoderBaseTask(BaseTask):
def __init__(self):
super(VocoderBaseTask, self).__init__()
self.max_sentences = hparams['max_sentences']
self.max_valid_sentences = hparams['max_valid_sentences']
if self.max_valid_sentences == -1:
hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences
self.dataset_cls = VocoderDataset
@data_loader
def train_dataloader(self):
train_dataset = self.dataset_cls('train', shuffle=True)
return self.build_dataloader(train_dataset, True, self.max_sentences, hparams['endless_ds'])
@data_loader
def val_dataloader(self):
valid_dataset = self.dataset_cls('valid', shuffle=False)
return self.build_dataloader(valid_dataset, False, self.max_valid_sentences)
@data_loader
def test_dataloader(self):
test_dataset = self.dataset_cls('test', shuffle=False)
return self.build_dataloader(test_dataset, False, self.max_valid_sentences)
def build_dataloader(self, dataset, shuffle, max_sentences, endless=False):
world_size = 1
rank = 0
if dist.is_initialized():
world_size = dist.get_world_size()
rank = dist.get_rank()
sampler_cls = DistributedSampler if not endless else EndlessDistributedSampler
train_sampler = sampler_cls(
dataset=dataset,
num_replicas=world_size,
rank=rank,
shuffle=shuffle,
)
return torch.utils.data.DataLoader(
dataset=dataset,
shuffle=False,
collate_fn=dataset.collater,
batch_size=max_sentences,
num_workers=dataset.num_workers,
sampler=train_sampler,
pin_memory=True,
)
def test_start(self):
self.gen_dir = os.path.join(hparams['work_dir'],
f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
os.makedirs(self.gen_dir, exist_ok=True)
def test_end(self, outputs):
return {}
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/tasks/vocoder/vocoder_base.py |
import librosa
from utils.hparams import hparams
import numpy as np
def denoise(wav, v=0.1):
spec = librosa.stft(y=wav, n_fft=hparams['fft_size'], hop_length=hparams['hop_size'],
win_length=hparams['win_size'], pad_mode='constant')
spec_m = np.abs(spec)
spec_m = np.clip(spec_m - v, a_min=0, a_max=None)
spec_a = np.angle(spec)
return librosa.istft(spec_m * np.exp(1j * spec_a), hop_length=hparams['hop_size'],
win_length=hparams['win_size'])
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/vocoders/vocoder_utils.py |
import glob
import re
import librosa
import torch
import yaml
from sklearn.preprocessing import StandardScaler
from torch import nn
from modules.parallel_wavegan.models import ParallelWaveGANGenerator
from modules.parallel_wavegan.utils import read_hdf5
from utils.hparams import hparams
from utils.pitch_utils import f0_to_coarse
from vocoders.base_vocoder import BaseVocoder, register_vocoder
import numpy as np
def load_pwg_model(config_path, checkpoint_path, stats_path):
# load config
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.Loader)
# setup
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = ParallelWaveGANGenerator(**config["generator_params"])
ckpt_dict = torch.load(checkpoint_path, map_location="cpu")
if 'state_dict' not in ckpt_dict: # official vocoder
model.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["model"]["generator"])
scaler = StandardScaler()
if config["format"] == "hdf5":
scaler.mean_ = read_hdf5(stats_path, "mean")
scaler.scale_ = read_hdf5(stats_path, "scale")
elif config["format"] == "npy":
scaler.mean_ = np.load(stats_path)[0]
scaler.scale_ = np.load(stats_path)[1]
else:
raise ValueError("support only hdf5 or npy format.")
else: # custom PWG vocoder
fake_task = nn.Module()
fake_task.model_gen = model
fake_task.load_state_dict(torch.load(checkpoint_path, map_location="cpu")["state_dict"], strict=False)
scaler = None
model.remove_weight_norm()
model = model.eval().to(device)
print(f"| Loaded model parameters from {checkpoint_path}.")
print(f"| PWG device: {device}.")
return model, scaler, config, device
@register_vocoder
class PWG(BaseVocoder):
def __init__(self):
if hparams['vocoder_ckpt'] == '': # load LJSpeech PWG pretrained model
base_dir = 'wavegan_pretrained'
ckpts = glob.glob(f'{base_dir}/checkpoint-*steps.pkl')
ckpt = sorted(ckpts, key=
lambda x: int(re.findall(f'{base_dir}/checkpoint-(\d+)steps.pkl', x)[0]))[-1]
config_path = f'{base_dir}/config.yaml'
print('| load PWG: ', ckpt)
self.model, self.scaler, self.config, self.device = load_pwg_model(
config_path=config_path,
checkpoint_path=ckpt,
stats_path=f'{base_dir}/stats.h5',
)
else:
base_dir = hparams['vocoder_ckpt']
print(base_dir)
config_path = f'{base_dir}/config.yaml'
ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1]
print('| load PWG: ', ckpt)
self.scaler = None
self.model, _, self.config, self.device = load_pwg_model(
config_path=config_path,
checkpoint_path=ckpt,
stats_path=f'{base_dir}/stats.h5',
)
def spec2wav(self, mel, **kwargs):
# start generation
config = self.config
device = self.device
pad_size = (config["generator_params"]["aux_context_window"],
config["generator_params"]["aux_context_window"])
c = mel
if self.scaler is not None:
c = self.scaler.transform(c)
with torch.no_grad():
z = torch.randn(1, 1, c.shape[0] * config["hop_size"]).to(device)
c = np.pad(c, (pad_size, (0, 0)), "edge")
c = torch.FloatTensor(c).unsqueeze(0).transpose(2, 1).to(device)
p = kwargs.get('f0')
if p is not None:
p = f0_to_coarse(p)
p = np.pad(p, (pad_size,), "edge")
p = torch.LongTensor(p[None, :]).to(device)
y = self.model(z, c, p).view(-1)
wav_out = y.cpu().numpy()
return wav_out
@staticmethod
def wav2spec(wav_fn, return_linear=False):
from data_gen.tts.data_gen_utils import process_utterance
res = process_utterance(
wav_fn, fft_size=hparams['fft_size'],
hop_size=hparams['hop_size'],
win_length=hparams['win_size'],
num_mels=hparams['audio_num_mel_bins'],
fmin=hparams['fmin'],
fmax=hparams['fmax'],
sample_rate=hparams['audio_sample_rate'],
loud_norm=hparams['loud_norm'],
min_level_db=hparams['min_level_db'],
return_linear=return_linear, vocoder='pwg', eps=float(hparams.get('wav2spec_eps', 1e-10)))
if return_linear:
return res[0], res[1].T, res[2].T # [T, 80], [T, n_fft]
else:
return res[0], res[1].T
@staticmethod
def wav2mfcc(wav_fn):
fft_size = hparams['fft_size']
hop_size = hparams['hop_size']
win_length = hparams['win_size']
sample_rate = hparams['audio_sample_rate']
wav, _ = librosa.core.load(wav_fn, sr=sample_rate)
mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13,
n_fft=fft_size, hop_length=hop_size,
win_length=win_length, pad_mode="constant", power=1.0)
mfcc_delta = librosa.feature.delta(mfcc, order=1)
mfcc_delta_delta = librosa.feature.delta(mfcc, order=2)
mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T
return mfcc
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/vocoders/pwg.py |
from vocoders import hifigan
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/vocoders/__init__.py |
import glob
import json
import os
import re
import librosa
import torch
import utils
from modules.hifigan.hifigan import HifiGanGenerator
from utils.hparams import hparams, set_hparams
from vocoders.base_vocoder import register_vocoder
from vocoders.pwg import PWG
from vocoders.vocoder_utils import denoise
def load_model(config_path, checkpoint_path):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ckpt_dict = torch.load(checkpoint_path, map_location="cpu")
if '.yaml' in config_path:
config = set_hparams(config_path, global_hparams=False)
state = ckpt_dict["state_dict"]["model_gen"]
elif '.json' in config_path:
config = json.load(open(config_path, 'r'))
state = ckpt_dict["generator"]
model = HifiGanGenerator(config)
model.load_state_dict(state, strict=True)
model.remove_weight_norm()
model = model.eval().to(device)
print(f"| Loaded model parameters from {checkpoint_path}.")
print(f"| HifiGAN device: {device}.")
return model, config, device
total_time = 0
@register_vocoder
class HifiGAN(PWG):
def __init__(self):
base_dir = hparams['vocoder_ckpt']
config_path = f'{base_dir}/config.yaml'
if os.path.exists(config_path):
ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1]
print('| load HifiGAN: ', ckpt)
self.model, self.config, self.device = load_model(config_path=config_path, checkpoint_path=ckpt)
else:
config_path = f'{base_dir}/config.json'
ckpt = f'{base_dir}/generator_v1'
if os.path.exists(config_path):
self.model, self.config, self.device = load_model(config_path=config_path, checkpoint_path=ckpt)
def spec2wav(self, mel, **kwargs):
device = self.device
with torch.no_grad():
c = torch.FloatTensor(mel).unsqueeze(0).transpose(2, 1).to(device)
with utils.Timer('hifigan', print_time=hparams['profile_infer']):
f0 = kwargs.get('f0')
if f0 is not None and hparams.get('use_nsf'):
f0 = torch.FloatTensor(f0[None, :]).to(device)
y = self.model(c, f0).view(-1)
else:
y = self.model(c).view(-1)
wav_out = y.cpu().numpy()
if hparams.get('vocoder_denoise_c', 0.0) > 0:
wav_out = denoise(wav_out, v=hparams['vocoder_denoise_c'])
return wav_out
# @staticmethod
# def wav2spec(wav_fn, **kwargs):
# wav, _ = librosa.core.load(wav_fn, sr=hparams['audio_sample_rate'])
# wav_torch = torch.FloatTensor(wav)[None, :]
# mel = mel_spectrogram(wav_torch, hparams).numpy()[0]
# return wav, mel.T
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/vocoders/hifigan.py |
import importlib
VOCODERS = {}
def register_vocoder(cls):
VOCODERS[cls.__name__.lower()] = cls
VOCODERS[cls.__name__] = cls
return cls
def get_vocoder_cls(hparams):
if hparams['vocoder'] in VOCODERS:
return VOCODERS[hparams['vocoder']]
else:
vocoder_cls = hparams['vocoder']
pkg = ".".join(vocoder_cls.split(".")[:-1])
cls_name = vocoder_cls.split(".")[-1]
vocoder_cls = getattr(importlib.import_module(pkg), cls_name)
return vocoder_cls
class BaseVocoder:
def spec2wav(self, mel):
"""
:param mel: [T, 80]
:return: wav: [T']
"""
raise NotImplementedError
@staticmethod
def wav2spec(wav_fn):
"""
:param wav_fn: str
:return: wav, mel: [T, 80]
"""
raise NotImplementedError
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/vocoders/base_vocoder.py |
import warnings
warnings.filterwarnings("ignore")
import parselmouth
import os
import torch
from skimage.transform import resize
from utils.text_encoder import TokenTextEncoder
from utils.pitch_utils import f0_to_coarse
import struct
import webrtcvad
from scipy.ndimage.morphology import binary_dilation
import librosa
import numpy as np
from utils import audio
import pyloudnorm as pyln
import re
import json
from collections import OrderedDict
PUNCS = '!,.?;:'
int16_max = (2 ** 15) - 1
def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12):
"""
Ensures that segments without voice in the waveform remain no longer than a
threshold determined by the VAD parameters in params.py.
:param wav: the raw waveform as a numpy array of floats
:param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have.
:return: the same waveform with silences trimmed away (length <= original wav length)
"""
## Voice Activation Detection
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
# This sets the granularity of the VAD. Should not need to be changed.
sampling_rate = 16000
wav_raw, sr = librosa.core.load(path, sr=sr)
if norm:
meter = pyln.Meter(sr) # create BS.1770 meter
loudness = meter.integrated_loudness(wav_raw)
wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0)
if np.abs(wav_raw).max() > 1.0:
wav_raw = wav_raw / np.abs(wav_raw).max()
wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best')
vad_window_length = 30 # In milliseconds
# Number of frames to average together when performing the moving average smoothing.
# The larger this value, the larger the VAD variations must be to not get smoothed out.
vad_moving_average_width = 8
# Compute the voice detection window size
samples_per_window = (vad_window_length * sampling_rate) // 1000
# Trim the end of the audio to have a multiple of the window size
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
# Convert the float waveform to 16-bit mono PCM
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
# Perform voice activation detection
voice_flags = []
vad = webrtcvad.Vad(mode=3)
for window_start in range(0, len(wav), samples_per_window):
window_end = window_start + samples_per_window
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
sample_rate=sampling_rate))
voice_flags = np.array(voice_flags)
# Smooth the voice detection with a moving average
def moving_average(array, width):
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
ret = np.cumsum(array_padded, dtype=float)
ret[width:] = ret[width:] - ret[:-width]
return ret[width - 1:] / width
audio_mask = moving_average(voice_flags, vad_moving_average_width)
audio_mask = np.round(audio_mask).astype(np.bool)
# Dilate the voiced regions
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
audio_mask = np.repeat(audio_mask, samples_per_window)
audio_mask = resize(audio_mask, (len(wav_raw),)) > 0
if return_raw_wav:
return wav_raw, audio_mask, sr
return wav_raw[audio_mask], audio_mask, sr
def process_utterance(wav_path,
fft_size=1024,
hop_size=256,
win_length=1024,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-6,
sample_rate=22050,
loud_norm=False,
min_level_db=-100,
return_linear=False,
trim_long_sil=False, vocoder='pwg'):
if isinstance(wav_path, str):
if trim_long_sil:
wav, _, _ = trim_long_silences(wav_path, sample_rate)
else:
wav, _ = librosa.core.load(wav_path, sr=sample_rate)
else:
wav = wav_path
if loud_norm:
meter = pyln.Meter(sample_rate) # create BS.1770 meter
loudness = meter.integrated_loudness(wav)
wav = pyln.normalize.loudness(wav, loudness, -22.0)
if np.abs(wav).max() > 1:
wav = wav / np.abs(wav).max()
# get amplitude spectrogram
x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="constant")
spc = np.abs(x_stft) # (n_bins, T)
# get mel basis
fmin = 0 if fmin == -1 else fmin
fmax = sample_rate / 2 if fmax == -1 else fmax
mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax)
mel = mel_basis @ spc
if vocoder == 'pwg':
mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T)
else:
assert False, f'"{vocoder}" is not in ["pwg"].'
l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1)
wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0)
wav = wav[:mel.shape[1] * hop_size]
if not return_linear:
return wav, mel
else:
spc = audio.amp_to_db(spc)
spc = audio.normalize(spc, {'min_level_db': min_level_db})
return wav, mel, spc
def get_pitch(wav_data, mel, hparams):
"""
:param wav_data: [T]
:param mel: [T, 80]
:param hparams:
:return:
"""
time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000
f0_min = 80
f0_max = 750
if hparams['hop_size'] == 128:
pad_size = 4
elif hparams['hop_size'] == 256:
pad_size = 2
else:
assert False
f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac(
time_step=time_step / 1000, voicing_threshold=0.6,
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
lpad = pad_size * 2
rpad = len(mel) - len(f0) - lpad
f0 = np.pad(f0, [[lpad, rpad]], mode='constant')
# mel and f0 are extracted by 2 different libraries. we should force them to have the same length.
# Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value...
# Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda)
delta_l = len(mel) - len(f0)
assert np.abs(delta_l) <= 8
if delta_l > 0:
f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0)
f0 = f0[:len(mel)]
pitch_coarse = f0_to_coarse(f0)
return f0, pitch_coarse
def remove_empty_lines(text):
"""remove empty lines"""
assert (len(text) > 0)
assert (isinstance(text, list))
text = [t.strip() for t in text]
if "" in text:
text.remove("")
return text
class TextGrid(object):
def __init__(self, text):
text = remove_empty_lines(text)
self.text = text
self.line_count = 0
self._get_type()
self._get_time_intval()
self._get_size()
self.tier_list = []
self._get_item_list()
def _extract_pattern(self, pattern, inc):
"""
Parameters
----------
pattern : regex to extract pattern
inc : increment of line count after extraction
Returns
-------
group : extracted info
"""
try:
group = re.match(pattern, self.text[self.line_count]).group(1)
self.line_count += inc
except AttributeError:
raise ValueError("File format error at line %d:%s" % (self.line_count, self.text[self.line_count]))
return group
def _get_type(self):
self.file_type = self._extract_pattern(r"File type = \"(.*)\"", 2)
def _get_time_intval(self):
self.xmin = self._extract_pattern(r"xmin = (.*)", 1)
self.xmax = self._extract_pattern(r"xmax = (.*)", 2)
def _get_size(self):
self.size = int(self._extract_pattern(r"size = (.*)", 2))
def _get_item_list(self):
"""Only supports IntervalTier currently"""
for itemIdx in range(1, self.size + 1):
tier = OrderedDict()
item_list = []
tier_idx = self._extract_pattern(r"item \[(.*)\]:", 1)
tier_class = self._extract_pattern(r"class = \"(.*)\"", 1)
if tier_class != "IntervalTier":
raise NotImplementedError("Only IntervalTier class is supported currently")
tier_name = self._extract_pattern(r"name = \"(.*)\"", 1)
tier_xmin = self._extract_pattern(r"xmin = (.*)", 1)
tier_xmax = self._extract_pattern(r"xmax = (.*)", 1)
tier_size = self._extract_pattern(r"intervals: size = (.*)", 1)
for i in range(int(tier_size)):
item = OrderedDict()
item["idx"] = self._extract_pattern(r"intervals \[(.*)\]", 1)
item["xmin"] = self._extract_pattern(r"xmin = (.*)", 1)
item["xmax"] = self._extract_pattern(r"xmax = (.*)", 1)
item["text"] = self._extract_pattern(r"text = \"(.*)\"", 1)
item_list.append(item)
tier["idx"] = tier_idx
tier["class"] = tier_class
tier["name"] = tier_name
tier["xmin"] = tier_xmin
tier["xmax"] = tier_xmax
tier["size"] = tier_size
tier["items"] = item_list
self.tier_list.append(tier)
def toJson(self):
_json = OrderedDict()
_json["file_type"] = self.file_type
_json["xmin"] = self.xmin
_json["xmax"] = self.xmax
_json["size"] = self.size
_json["tiers"] = self.tier_list
return json.dumps(_json, ensure_ascii=False, indent=2)
def get_mel2ph(tg_fn, ph, mel, hparams):
ph_list = ph.split(" ")
with open(tg_fn, "r") as f:
tg = f.readlines()
tg = remove_empty_lines(tg)
tg = TextGrid(tg)
tg = json.loads(tg.toJson())
split = np.ones(len(ph_list) + 1, np.float) * -1
tg_idx = 0
ph_idx = 0
tg_align = [x for x in tg['tiers'][-1]['items']]
tg_align_ = []
for x in tg_align:
x['xmin'] = float(x['xmin'])
x['xmax'] = float(x['xmax'])
if x['text'] in ['sil', 'sp', '', 'SIL', 'PUNC']:
x['text'] = ''
if len(tg_align_) > 0 and tg_align_[-1]['text'] == '':
tg_align_[-1]['xmax'] = x['xmax']
continue
tg_align_.append(x)
tg_align = tg_align_
tg_len = len([x for x in tg_align if x['text'] != ''])
ph_len = len([x for x in ph_list if not is_sil_phoneme(x)])
assert tg_len == ph_len, (tg_len, ph_len, tg_align, ph_list, tg_fn)
while tg_idx < len(tg_align) or ph_idx < len(ph_list):
if tg_idx == len(tg_align) and is_sil_phoneme(ph_list[ph_idx]):
split[ph_idx] = 1e8
ph_idx += 1
continue
x = tg_align[tg_idx]
if x['text'] == '' and ph_idx == len(ph_list):
tg_idx += 1
continue
assert ph_idx < len(ph_list), (tg_len, ph_len, tg_align, ph_list, tg_fn)
ph = ph_list[ph_idx]
if x['text'] == '' and not is_sil_phoneme(ph):
assert False, (ph_list, tg_align)
if x['text'] != '' and is_sil_phoneme(ph):
ph_idx += 1
else:
assert (x['text'] == '' and is_sil_phoneme(ph)) \
or x['text'].lower() == ph.lower() \
or x['text'].lower() == 'sil', (x['text'], ph)
split[ph_idx] = x['xmin']
if ph_idx > 0 and split[ph_idx - 1] == -1 and is_sil_phoneme(ph_list[ph_idx - 1]):
split[ph_idx - 1] = split[ph_idx]
ph_idx += 1
tg_idx += 1
assert tg_idx == len(tg_align), (tg_idx, [x['text'] for x in tg_align])
assert ph_idx >= len(ph_list) - 1, (ph_idx, ph_list, len(ph_list), [x['text'] for x in tg_align], tg_fn)
mel2ph = np.zeros([mel.shape[0]], np.int)
split[0] = 0
split[-1] = 1e8
for i in range(len(split) - 1):
assert split[i] != -1 and split[i] <= split[i + 1], (split[:-1],)
split = [int(s * hparams['audio_sample_rate'] / hparams['hop_size'] + 0.5) for s in split]
for ph_idx in range(len(ph_list)):
mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1
mel2ph_torch = torch.from_numpy(mel2ph)
T_t = len(ph_list)
dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch))
dur = dur[1:].numpy()
return mel2ph, dur
def build_phone_encoder(data_dir):
phone_list_file = os.path.join(data_dir, 'phone_set.json')
phone_list = json.load(open(phone_list_file))
return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
def build_word_encoder(data_dir):
word_list_file = os.path.join(data_dir, 'word_set.json')
word_list = json.load(open(word_list_file))
return TokenTextEncoder(None, vocab_list=word_list, replace_oov=',')
def is_sil_phoneme(p):
return not p[0].isalpha()
def build_token_encoder(token_list_file):
token_list = json.load(open(token_list_file))
return TokenTextEncoder(None, vocab_list=token_list, replace_oov='<UNK>')
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/data_gen_utils.py |
import os
os.environ["OMP_NUM_THREADS"] = "1"
from utils.multiprocess_utils import chunked_multiprocess_run
import random
import traceback
import json
from resemblyzer import VoiceEncoder
from tqdm import tqdm
from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder
from utils.hparams import set_hparams, hparams
import numpy as np
from utils.indexed_datasets import IndexedDatasetBuilder
from vocoders.base_vocoder import VOCODERS
import pandas as pd
class BinarizationError(Exception):
pass
class BaseBinarizer:
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams['processed_data_dir']
self.processed_data_dirs = processed_data_dir.split(",")
self.binarization_args = hparams['binarization_args']
self.pre_align_args = hparams['pre_align_args']
self.forced_align = self.pre_align_args['forced_align']
tg_dir = None
if self.forced_align == 'mfa':
tg_dir = 'mfa_outputs'
if self.forced_align == 'kaldi':
tg_dir = 'kaldi_outputs'
self.item2txt = {}
self.item2ph = {}
self.item2wavfn = {}
self.item2tgfn = {}
self.item2spk = {}
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str)
for r_idx, r in self.meta_df.iterrows():
item_name = raw_item_name = r['item_name']
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2txt[item_name] = r['txt']
self.item2ph[item_name] = r['ph']
self.item2wavfn[item_name] = os.path.join(hparams['raw_data_dir'], 'wavs', os.path.basename(r['wav_fn']).split('_')[1])
self.item2spk[item_name] = r.get('spk', 'SPK1')
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
if tg_dir is not None:
self.item2tgfn[item_name] = f"{processed_data_dir}/{tg_dir}/{raw_item_name}.TextGrid"
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
@property
def train_item_names(self):
return self.item_names[hparams['test_num']+hparams['valid_num']:]
@property
def valid_item_names(self):
return self.item_names[0: hparams['test_num']+hparams['valid_num']] #
@property
def test_item_names(self):
return self.item_names[0: hparams['test_num']] # Audios for MOS testing are in 'test_ids'
def build_spk_map(self):
spk_map = set()
for item_name in self.item_names:
spk_name = self.item2spk[item_name]
spk_map.add(spk_name)
spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))}
assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
return spk_map
def item_name2spk_id(self, item_name):
return self.spk_map[self.item2spk[item_name]]
def _phone_encoder(self):
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
ph_set = []
if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn):
for processed_data_dir in self.processed_data_dirs:
ph_set += [x.split(' ')[0] for x in open(f'{processed_data_dir}/dict.txt').readlines()]
ph_set = sorted(set(ph_set))
json.dump(ph_set, open(ph_set_fn, 'w'))
else:
ph_set = json.load(open(ph_set_fn, 'r'))
print("| phone set: ", ph_set)
return build_phone_encoder(hparams['binary_data_dir'])
def meta_data(self, prefix):
if prefix == 'valid':
item_names = self.valid_item_names
elif prefix == 'test':
item_names = self.test_item_names
else:
item_names = self.train_item_names
for item_name in item_names:
ph = self.item2ph[item_name]
txt = self.item2txt[item_name]
tg_fn = self.item2tgfn.get(item_name)
wav_fn = self.item2wavfn[item_name]
spk_id = self.item_name2spk_id(item_name)
yield item_name, ph, txt, tg_fn, wav_fn, spk_id
def process(self):
os.makedirs(hparams['binary_data_dir'], exist_ok=True)
self.spk_map = self.build_spk_map()
print("| spk_map: ", self.spk_map)
spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
json.dump(self.spk_map, open(spk_map_fn, 'w'))
self.phone_encoder = self._phone_encoder()
self.process_data('valid')
self.process_data('test')
self.process_data('train')
def process_data(self, prefix):
data_dir = hparams['binary_data_dir']
args = []
builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}')
lengths = []
f0s = []
total_sec = 0
if self.binarization_args['with_spk_embed']:
voice_encoder = VoiceEncoder().cuda()
meta_data = list(self.meta_data(prefix))
for m in meta_data:
args.append(list(m) + [self.phone_encoder, self.binarization_args])
num_workers = int(os.getenv('N_PROC', os.cpu_count() // 3))
for f_id, (_, item) in enumerate(
zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))):
if item is None:
continue
item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \
if self.binarization_args['with_spk_embed'] else None
if not self.binarization_args['with_wav'] and 'wav' in item:
print("del wav")
del item['wav']
builder.add_item(item)
lengths.append(item['len'])
total_sec += item['sec']
if item.get('f0') is not None:
f0s.append(item['f0'])
builder.finalize()
np.save(f'{data_dir}/{prefix}_lengths.npy', lengths)
if len(f0s) > 0:
f0s = np.concatenate(f0s, 0)
f0s = f0s[f0s != 0]
np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()])
print(f"| {prefix} total duration: {total_sec:.3f}s")
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
cls.get_pitch(wav, mel, res)
if binarization_args['with_f0cwt']:
cls.get_f0cwt(res['f0'], res)
if binarization_args['with_txt']:
try:
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(tg_fn, ph, mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
@staticmethod
def get_align(tg_fn, ph, mel, phone_encoded, res):
if tg_fn is not None and os.path.exists(tg_fn):
mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams)
else:
raise BinarizationError(f"Align not found")
if mel2ph.max() - 1 >= len(phone_encoded):
raise BinarizationError(
f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}")
res['mel2ph'] = mel2ph
res['dur'] = dur
@staticmethod
def get_pitch(wav, mel, res):
f0, pitch_coarse = get_pitch(wav, mel, hparams)
if sum(f0) == 0:
raise BinarizationError("Empty f0")
res['f0'] = f0
res['pitch'] = pitch_coarse
@staticmethod
def get_f0cwt(f0, res):
from utils.cwt import get_cont_lf0, get_lf0_cwt
uv, cont_lf0_lpf = get_cont_lf0(f0)
logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf)
cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm)
if np.any(np.isnan(Wavelet_lf0)):
raise BinarizationError("NaN CWT")
res['cwt_spec'] = Wavelet_lf0
res['cwt_scales'] = scales
res['f0_mean'] = logf0s_mean_org
res['f0_std'] = logf0s_std_org
if __name__ == "__main__":
set_hparams()
BaseBinarizer().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer.py |
import os
os.environ["OMP_NUM_THREADS"] = "1"
import torch
from collections import Counter
from utils.text_encoder import TokenTextEncoder
from data_gen.tts.emotion import inference as EmotionEncoder
from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
from data_gen.tts.emotion.inference import preprocess_wav
from utils.multiprocess_utils import chunked_multiprocess_run
import random
import traceback
import json
from resemblyzer import VoiceEncoder
from tqdm import tqdm
from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder, is_sil_phoneme
from utils.hparams import hparams, set_hparams
import numpy as np
from utils.indexed_datasets import IndexedDatasetBuilder
from vocoders.base_vocoder import get_vocoder_cls
import pandas as pd
class BinarizationError(Exception):
pass
class EmotionBinarizer:
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams['processed_data_dir']
self.processed_data_dirs = processed_data_dir.split(",")
self.binarization_args = hparams['binarization_args']
self.pre_align_args = hparams['pre_align_args']
self.item2txt = {}
self.item2ph = {}
self.item2wavfn = {}
self.item2tgfn = {}
self.item2spk = {}
self.item2emo = {}
def load_meta_data(self):
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str)
for r_idx, r in tqdm(self.meta_df.iterrows(), desc='Loading meta data.'):
item_name = raw_item_name = r['item_name']
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2txt[item_name] = r['txt']
self.item2ph[item_name] = r['ph']
self.item2wavfn[item_name] = r['wav_fn']
self.item2spk[item_name] = r.get('spk_name', 'SPK1') \
if self.binarization_args['with_spk_id'] else 'SPK1'
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
self.item2tgfn[item_name] = f"{processed_data_dir}/mfa_outputs/{raw_item_name}.TextGrid"
self.item2emo[item_name] = r.get('others', '"Neutral"')
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
@property
def train_item_names(self):
return self.item_names[hparams['test_num']:]
@property
def valid_item_names(self):
return self.item_names[:hparams['test_num']]
@property
def test_item_names(self):
return self.valid_item_names
def build_spk_map(self):
spk_map = set()
for item_name in self.item_names:
spk_name = self.item2spk[item_name]
spk_map.add(spk_name)
spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))}
print("| #Spk: ", len(spk_map))
assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
return spk_map
def build_emo_map(self):
emo_map = set()
for item_name in self.item_names:
emo_name = self.item2emo[item_name]
emo_map.add(emo_name)
emo_map = {x: i for i, x in enumerate(sorted(list(emo_map)))}
print("| #Emo: ", len(emo_map))
return emo_map
def item_name2spk_id(self, item_name):
return self.spk_map[self.item2spk[item_name]]
def item_name2emo_id(self, item_name):
return self.emo_map[self.item2emo[item_name]]
def _phone_encoder(self):
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
ph_set = []
if self.binarization_args['reset_phone_dict'] or not os.path.exists(ph_set_fn):
for ph_sent in self.item2ph.values():
ph_set += ph_sent.split(' ')
ph_set = sorted(set(ph_set))
json.dump(ph_set, open(ph_set_fn, 'w'))
print("| Build phone set: ", ph_set)
else:
ph_set = json.load(open(ph_set_fn, 'r'))
print("| Load phone set: ", ph_set)
return build_phone_encoder(hparams['binary_data_dir'])
def _word_encoder(self):
fn = f"{hparams['binary_data_dir']}/word_set.json"
word_set = []
if self.binarization_args['reset_word_dict']:
for word_sent in self.item2txt.values():
word_set += [x for x in word_sent.split(' ') if x != '']
word_set = Counter(word_set)
total_words = sum(word_set.values())
word_set = word_set.most_common(hparams['word_size'])
num_unk_words = total_words - sum([x[1] for x in word_set])
word_set = [x[0] for x in word_set]
json.dump(word_set, open(fn, 'w'))
print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words},"
f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.")
else:
word_set = json.load(open(fn, 'r'))
print("| Load word set. Size: ", len(word_set), word_set[:10])
return TokenTextEncoder(None, vocab_list=word_set, replace_oov='<UNK>')
def meta_data(self, prefix):
if prefix == 'valid':
item_names = self.valid_item_names
elif prefix == 'test':
item_names = self.test_item_names
else:
item_names = self.train_item_names
for item_name in item_names:
ph = self.item2ph[item_name]
txt = self.item2txt[item_name]
tg_fn = self.item2tgfn.get(item_name)
wav_fn = self.item2wavfn[item_name]
spk_id = self.item_name2spk_id(item_name)
emotion = self.item_name2emo_id(item_name)
yield item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion
def process(self):
self.load_meta_data()
os.makedirs(hparams['binary_data_dir'], exist_ok=True)
self.spk_map = self.build_spk_map()
print("| spk_map: ", self.spk_map)
spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
json.dump(self.spk_map, open(spk_map_fn, 'w'))
self.emo_map = self.build_emo_map()
print("| emo_map: ", self.emo_map)
emo_map_fn = f"{hparams['binary_data_dir']}/emo_map.json"
json.dump(self.emo_map, open(emo_map_fn, 'w'))
self.phone_encoder = self._phone_encoder()
self.word_encoder = None
EmotionEncoder.load_model(hparams['emotion_encoder_path'])
if self.binarization_args['with_word']:
self.word_encoder = self._word_encoder()
self.process_data('valid')
self.process_data('test')
self.process_data('train')
def process_data(self, prefix):
data_dir = hparams['binary_data_dir']
args = []
builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}')
ph_lengths = []
mel_lengths = []
f0s = []
total_sec = 0
if self.binarization_args['with_spk_embed']:
voice_encoder = VoiceEncoder().cuda()
meta_data = list(self.meta_data(prefix))
for m in meta_data:
args.append(list(m) + [(self.phone_encoder, self.word_encoder), self.binarization_args])
num_workers = self.num_workers
for f_id, (_, item) in enumerate(
zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))):
if item is None:
continue
item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \
if self.binarization_args['with_spk_embed'] else None
processed_wav = preprocess_wav(item['wav_fn'])
item['emo_embed'] = Embed_utterance(processed_wav)
if not self.binarization_args['with_wav'] and 'wav' in item:
del item['wav']
builder.add_item(item)
mel_lengths.append(item['len'])
if 'ph_len' in item:
ph_lengths.append(item['ph_len'])
total_sec += item['sec']
if item.get('f0') is not None:
f0s.append(item['f0'])
builder.finalize()
np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths)
if len(ph_lengths) > 0:
np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths)
if len(f0s) > 0:
f0s = np.concatenate(f0s, 0)
f0s = f0s[f0s != 0]
np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()])
print(f"| {prefix} total duration: {total_sec:.3f}s")
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion, encoder, binarization_args):
res = {'item_name': item_name, 'txt': txt, 'ph': ph, 'wav_fn': wav_fn, 'spk_id': spk_id, 'emotion': emotion}
if binarization_args['with_linear']:
wav, mel, linear_stft = get_vocoder_cls(hparams).wav2spec(wav_fn) # , return_linear=True
res['linear'] = linear_stft
else:
wav, mel = get_vocoder_cls(hparams).wav2spec(wav_fn)
wav = wav.astype(np.float16)
res.update({'mel': mel, 'wav': wav,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]})
try:
if binarization_args['with_f0']:
cls.get_pitch(res)
if binarization_args['with_f0cwt']:
cls.get_f0cwt(res)
if binarization_args['with_txt']:
ph_encoder, word_encoder = encoder
try:
res['phone'] = ph_encoder.encode(ph)
res['ph_len'] = len(res['phone'])
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(tg_fn, res)
if binarization_args['trim_eos_bos']:
bos_dur = res['dur'][0]
eos_dur = res['dur'][-1]
res['mel'] = mel[bos_dur:-eos_dur]
res['f0'] = res['f0'][bos_dur:-eos_dur]
res['pitch'] = res['pitch'][bos_dur:-eos_dur]
res['mel2ph'] = res['mel2ph'][bos_dur:-eos_dur]
res['wav'] = wav[bos_dur * hparams['hop_size']:-eos_dur * hparams['hop_size']]
res['dur'] = res['dur'][1:-1]
res['len'] = res['mel'].shape[0]
if binarization_args['with_word']:
cls.get_word(res, word_encoder)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
except Exception as e:
traceback.print_exc()
print(f"| Skip item. item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
@staticmethod
def get_align(tg_fn, res):
ph = res['ph']
mel = res['mel']
phone_encoded = res['phone']
if tg_fn is not None and os.path.exists(tg_fn):
mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams)
else:
raise BinarizationError(f"Align not found")
if mel2ph.max() - 1 >= len(phone_encoded):
raise BinarizationError(
f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}")
res['mel2ph'] = mel2ph
res['dur'] = dur
@staticmethod
def get_pitch(res):
wav, mel = res['wav'], res['mel']
f0, pitch_coarse = get_pitch(wav, mel, hparams)
if sum(f0) == 0:
raise BinarizationError("Empty f0")
res['f0'] = f0
res['pitch'] = pitch_coarse
@staticmethod
def get_f0cwt(res):
from utils.cwt import get_cont_lf0, get_lf0_cwt
f0 = res['f0']
uv, cont_lf0_lpf = get_cont_lf0(f0)
logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf)
cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm)
if np.any(np.isnan(Wavelet_lf0)):
raise BinarizationError("NaN CWT")
res['cwt_spec'] = Wavelet_lf0
res['cwt_scales'] = scales
res['f0_mean'] = logf0s_mean_org
res['f0_std'] = logf0s_std_org
@staticmethod
def get_word(res, word_encoder):
ph_split = res['ph'].split(" ")
# ph side mapping to word
ph_words = [] # ['<BOS>', 'N_AW1_', ',', 'AE1_Z_|', 'AO1_L_|', 'B_UH1_K_S_|', 'N_AA1_T_|', ....]
ph2word = np.zeros([len(ph_split)], dtype=int)
last_ph_idx_for_word = [] # [2, 11, ...]
for i, ph in enumerate(ph_split):
if ph == '|':
last_ph_idx_for_word.append(i)
elif not ph[0].isalnum():
if ph not in ['<BOS>']:
last_ph_idx_for_word.append(i - 1)
last_ph_idx_for_word.append(i)
start_ph_idx_for_word = [0] + [i + 1 for i in last_ph_idx_for_word[:-1]]
for i, (s_w, e_w) in enumerate(zip(start_ph_idx_for_word, last_ph_idx_for_word)):
ph_words.append(ph_split[s_w:e_w + 1])
ph2word[s_w:e_w + 1] = i
ph2word = ph2word.tolist()
ph_words = ["_".join(w) for w in ph_words]
# mel side mapping to word
mel2word = []
dur_word = [0 for _ in range(len(ph_words))]
for i, m2p in enumerate(res['mel2ph']):
word_idx = ph2word[m2p - 1]
mel2word.append(ph2word[m2p - 1])
dur_word[word_idx] += 1
ph2word = [x + 1 for x in ph2word] # 0预留给padding
mel2word = [x + 1 for x in mel2word] # 0预留给padding
res['ph_words'] = ph_words # [T_word]
res['ph2word'] = ph2word # [T_ph]
res['mel2word'] = mel2word # [T_mel]
res['dur_word'] = dur_word # [T_word]
words = [x for x in res['txt'].split(" ") if x != '']
while len(words) > 0 and is_sil_phoneme(words[0]):
words = words[1:]
while len(words) > 0 and is_sil_phoneme(words[-1]):
words = words[:-1]
words = ['<BOS>'] + words + ['<EOS>']
word_tokens = word_encoder.encode(" ".join(words))
res['words'] = words
res['word_tokens'] = word_tokens
assert len(words) == len(ph_words), [words, ph_words]
@property
def num_workers(self):
return int(os.getenv('N_PROC', hparams.get('N_PROC', os.cpu_count())))
if __name__ == "__main__":
set_hparams()
EmotionBinarizer().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py |
import os
os.environ["OMP_NUM_THREADS"] = "1"
from data_gen.tts.txt_processors.zh_g2pM import ALL_SHENMU
from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError
from data_gen.tts.data_gen_utils import get_mel2ph
from utils.hparams import set_hparams, hparams
import numpy as np
class ZhBinarizer(BaseBinarizer):
@staticmethod
def get_align(tg_fn, ph, mel, phone_encoded, res):
if tg_fn is not None and os.path.exists(tg_fn):
_, dur = get_mel2ph(tg_fn, ph, mel, hparams)
else:
raise BinarizationError(f"Align not found")
ph_list = ph.split(" ")
assert len(dur) == len(ph_list)
mel2ph = []
# 分隔符的时长分配给韵母
dur_cumsum = np.pad(np.cumsum(dur), [1, 0], mode='constant', constant_values=0)
for i in range(len(dur)):
p = ph_list[i]
if p[0] != '<' and not p[0].isalpha():
uv_ = res['f0'][dur_cumsum[i]:dur_cumsum[i + 1]] == 0
j = 0
while j < len(uv_) and not uv_[j]:
j += 1
dur[i - 1] += j
dur[i] -= j
if dur[i] < 100:
dur[i - 1] += dur[i]
dur[i] = 0
# 声母和韵母等长
for i in range(len(dur)):
p = ph_list[i]
if p in ALL_SHENMU:
p_next = ph_list[i + 1]
if not (dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU):
print(f"assert dur[i] > 0 and p_next[0].isalpha() and p_next not in ALL_SHENMU, "
f"dur[i]: {dur[i]}, p: {p}, p_next: {p_next}.")
continue
total = dur[i + 1] + dur[i]
dur[i] = total // 2
dur[i + 1] = total - dur[i]
for i in range(len(dur)):
mel2ph += [i + 1] * dur[i]
mel2ph = np.array(mel2ph)
if mel2ph.max() - 1 >= len(phone_encoded):
raise BinarizationError(f"| Align does not match: {(mel2ph.max() - 1, len(phone_encoded))}")
res['mel2ph'] = mel2ph
res['dur'] = dur
if __name__ == "__main__":
set_hparams()
ZhBinarizer().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/binarizer_zh.py |
import json
import os
import random
import re
import traceback
from collections import Counter
from functools import partial
import pandas as pd
import librosa
from tqdm import tqdm
from data_gen.tts.txt_processors.base_text_processor import get_txt_processor_cls
from data_gen.tts.wav_processors.base_processor import get_wav_processor_cls
from utils.hparams import hparams
from utils.multiprocess_utils import multiprocess_run_tqdm
from utils.os_utils import link_file, move_file, remove_file
from data_gen.tts.data_gen_utils import is_sil_phoneme, build_token_encoder
class BasePreprocessor:
def __init__(self):
self.preprocess_args = hparams['preprocess_args']
txt_processor = self.preprocess_args['txt_processor']
self.txt_processor = get_txt_processor_cls(txt_processor)
self.raw_data_dir = hparams['raw_data_dir']
self.processed_dir = hparams['processed_data_dir']
self.spk_map_fn = f"{self.processed_dir}/spk_map.json"
def meta_data(self):
"""
:return: {'item_name': Str, 'wav_fn': Str, 'txt': Str, 'spk_name': Str, 'txt_loader': None or Func}
"""
raise NotImplementedError
def process(self):
processed_dir = self.processed_dir
wav_processed_tmp_dir = f'{processed_dir}/processed_tmp'
remove_file(wav_processed_tmp_dir)
os.makedirs(wav_processed_tmp_dir, exist_ok=True)
wav_processed_dir = f'{processed_dir}/{self.wav_processed_dirname}'
remove_file(wav_processed_dir)
os.makedirs(wav_processed_dir, exist_ok=True)
meta_data = list(tqdm(self.meta_data(), desc='Load meta data'))
item_names = [d['item_name'] for d in meta_data]
assert len(item_names) == len(set(item_names)), 'Key `item_name` should be Unique.'
# preprocess data
phone_list = []
word_list = []
spk_names = set()
process_item = partial(self.preprocess_first_pass,
txt_processor=self.txt_processor,
wav_processed_dir=wav_processed_dir,
wav_processed_tmp=wav_processed_tmp_dir,
preprocess_args=self.preprocess_args)
items = []
args = [{
'item_name': item_raw['item_name'],
'txt_raw': item_raw['txt'],
'wav_fn': item_raw['wav_fn'],
'txt_loader': item_raw.get('txt_loader'),
'others': item_raw.get('others', None)
} for item_raw in meta_data]
for item_, (item_id, item) in zip(meta_data, multiprocess_run_tqdm(process_item, args, desc='Preprocess')):
if item is not None:
item_.update(item)
item = item_
if 'txt_loader' in item:
del item['txt_loader']
item['id'] = item_id
item['spk_name'] = item.get('spk_name', '<SINGLE_SPK>')
item['others'] = item.get('others', None)
phone_list += item['ph'].split(" ")
word_list += item['word'].split(" ")
spk_names.add(item['spk_name'])
items.append(item)
# add encoded tokens
ph_encoder, word_encoder = self._phone_encoder(phone_list), self._word_encoder(word_list)
spk_map = self.build_spk_map(spk_names)
args = [{
'ph': item['ph'], 'word': item['word'], 'spk_name': item['spk_name'],
'word_encoder': word_encoder, 'ph_encoder': ph_encoder, 'spk_map': spk_map
} for item in items]
for idx, item_new_kv in multiprocess_run_tqdm(self.preprocess_second_pass, args, desc='Add encoded tokens'):
items[idx].update(item_new_kv)
# build mfa data
if self.preprocess_args['use_mfa']:
mfa_dict = set()
mfa_input_dir = f'{processed_dir}/mfa_inputs'
remove_file(mfa_input_dir)
# group MFA inputs for better parallelism
mfa_groups = [i // self.preprocess_args['nsample_per_mfa_group'] for i in range(len(items))]
if self.preprocess_args['mfa_group_shuffle']:
random.seed(hparams['seed'])
random.shuffle(mfa_groups)
args = [{
'item': item, 'mfa_input_dir': mfa_input_dir,
'mfa_group': mfa_group, 'wav_processed_tmp': wav_processed_tmp_dir,
'preprocess_args': self.preprocess_args
} for item, mfa_group in zip(items, mfa_groups)]
for i, (ph_gb_word_nosil, new_wav_align_fn) in multiprocess_run_tqdm(
self.build_mfa_inputs, args, desc='Build MFA data'):
items[i]['wav_align_fn'] = new_wav_align_fn
for w in ph_gb_word_nosil.split(" "):
mfa_dict.add(f"{w} {w.replace('_', ' ')}")
mfa_dict = sorted(mfa_dict)
with open(f'{processed_dir}/mfa_dict.txt', 'w') as f:
f.writelines([f'{l}\n' for l in mfa_dict])
with open(f"{processed_dir}/{self.meta_csv_filename}.json", 'w') as f:
f.write(re.sub(r'\n\s+([\d+\]])', r'\1', json.dumps(items, ensure_ascii=False, sort_keys=False, indent=1)))
remove_file(wav_processed_tmp_dir)
@classmethod
def preprocess_first_pass(cls, item_name, txt_raw, txt_processor,
wav_fn, wav_processed_dir, wav_processed_tmp,
preprocess_args, txt_loader=None, others=None):
try:
if txt_loader is not None:
txt_raw = txt_loader(txt_raw)
ph, txt, word, ph2word, ph_gb_word = cls.txt_to_ph(txt_processor, txt_raw, preprocess_args)
wav_fn, wav_align_fn = cls.process_wav(
item_name, wav_fn,
hparams['processed_data_dir'],
wav_processed_tmp, preprocess_args)
# wav for binarization
ext = os.path.splitext(wav_fn)[1]
os.makedirs(wav_processed_dir, exist_ok=True)
new_wav_fn = f"{wav_processed_dir}/{item_name}{ext}"
move_link_func = move_file if os.path.dirname(wav_fn) == wav_processed_tmp else link_file
move_link_func(wav_fn, new_wav_fn)
return {
'txt': txt, 'txt_raw': txt_raw, 'ph': ph,
'word': word, 'ph2word': ph2word, 'ph_gb_word': ph_gb_word,
'wav_fn': new_wav_fn, 'wav_align_fn': wav_align_fn,
'others': others
}
except:
traceback.print_exc()
print(f"| Error is caught. item_name: {item_name}.")
return None
@staticmethod
def txt_to_ph(txt_processor, txt_raw, preprocess_args):
txt_struct, txt = txt_processor.process(txt_raw, preprocess_args)
ph = [p for w in txt_struct for p in w[1]]
ph_gb_word = ["_".join(w[1]) for w in txt_struct]
words = [w[0] for w in txt_struct]
# word_id=0 is reserved for padding
ph2word = [w_id + 1 for w_id, w in enumerate(txt_struct) for _ in range(len(w[1]))]
return " ".join(ph), txt, " ".join(words), ph2word, " ".join(ph_gb_word)
@staticmethod
def process_wav(item_name, wav_fn, processed_dir, wav_processed_tmp, preprocess_args):
processors = [get_wav_processor_cls(v) for v in preprocess_args['wav_processors']]
processors = [k() for k in processors if k is not None]
if len(processors) >= 1:
sr_file = librosa.core.get_samplerate(wav_fn)
output_fn_for_align = None
ext = os.path.splitext(wav_fn)[1]
input_fn = f"{wav_processed_tmp}/{item_name}{ext}"
link_file(wav_fn, input_fn)
for p in processors:
outputs = p.process(input_fn, sr_file, wav_processed_tmp, processed_dir, item_name, preprocess_args)
if len(outputs) == 3:
input_fn, sr, output_fn_for_align = outputs
else:
input_fn, sr = outputs
if output_fn_for_align is None:
return input_fn, input_fn
else:
return input_fn, output_fn_for_align
else:
return wav_fn, wav_fn
def _phone_encoder(self, ph_set):
ph_set_fn = f"{self.processed_dir}/phone_set.json"
if self.preprocess_args['reset_phone_dict'] or not os.path.exists(ph_set_fn):
ph_set = sorted(set(ph_set))
json.dump(ph_set, open(ph_set_fn, 'w'), ensure_ascii=False)
print("| Build phone set: ", ph_set)
else:
ph_set = json.load(open(ph_set_fn, 'r'))
print("| Load phone set: ", ph_set)
return build_token_encoder(ph_set_fn)
def _word_encoder(self, word_set):
word_set_fn = f"{self.processed_dir}/word_set.json"
if self.preprocess_args['reset_word_dict']:
word_set = Counter(word_set)
total_words = sum(word_set.values())
word_set = word_set.most_common(hparams['word_dict_size'])
num_unk_words = total_words - sum([x[1] for x in word_set])
word_set = ['<BOS>', '<EOS>'] + [x[0] for x in word_set]
word_set = sorted(set(word_set))
json.dump(word_set, open(word_set_fn, 'w'), ensure_ascii=False)
print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words},"
f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.")
else:
word_set = json.load(open(word_set_fn, 'r'))
print("| Load word set. Size: ", len(word_set), word_set[:10])
return build_token_encoder(word_set_fn)
@classmethod
def preprocess_second_pass(cls, word, ph, spk_name, word_encoder, ph_encoder, spk_map):
word_token = word_encoder.encode(word)
ph_token = ph_encoder.encode(ph)
spk_id = spk_map[spk_name]
return {'word_token': word_token, 'ph_token': ph_token, 'spk_id': spk_id}
def build_spk_map(self, spk_names):
spk_map = {x: i for i, x in enumerate(sorted(list(spk_names)))}
assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
print(f"| Number of spks: {len(spk_map)}, spk_map: {spk_map}")
json.dump(spk_map, open(self.spk_map_fn, 'w'), ensure_ascii=False)
return spk_map
@classmethod
def build_mfa_inputs(cls, item, mfa_input_dir, mfa_group, wav_processed_tmp, preprocess_args):
item_name = item['item_name']
wav_align_fn = item['wav_align_fn']
ph_gb_word = item['ph_gb_word']
ext = os.path.splitext(wav_align_fn)[1]
mfa_input_group_dir = f'{mfa_input_dir}/{mfa_group}'
os.makedirs(mfa_input_group_dir, exist_ok=True)
new_wav_align_fn = f"{mfa_input_group_dir}/{item_name}{ext}"
move_link_func = move_file if os.path.dirname(wav_align_fn) == wav_processed_tmp else link_file
move_link_func(wav_align_fn, new_wav_align_fn)
ph_gb_word_nosil = " ".join(["_".join([p for p in w.split("_") if not is_sil_phoneme(p)])
for w in ph_gb_word.split(" ") if not is_sil_phoneme(w)])
with open(f'{mfa_input_group_dir}/{item_name}.lab', 'w') as f_txt:
f_txt.write(ph_gb_word_nosil)
return ph_gb_word_nosil, new_wav_align_fn
def load_spk_map(self, base_dir):
spk_map_fn = f"{base_dir}/spk_map.json"
spk_map = json.load(open(spk_map_fn, 'r'))
return spk_map
def load_dict(self, base_dir):
ph_encoder = build_token_encoder(f'{base_dir}/phone_set.json')
word_encoder = build_token_encoder(f'{base_dir}/word_set.json')
return ph_encoder, word_encoder
@property
def meta_csv_filename(self):
return 'metadata'
@property
def wav_processed_dirname(self):
return 'wav_processed' | EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/base_preprocess.py |
import os
import subprocess
import librosa
import numpy as np
from data_gen.tts.wav_processors.base_processor import BaseWavProcessor, register_wav_processors
from data_gen.tts.data_gen_utils import trim_long_silences
from utils.audio import save_wav, rnnoise
from utils.hparams import hparams
@register_wav_processors(name='sox_to_wav')
class ConvertToWavProcessor(BaseWavProcessor):
@property
def name(self):
return 'ToWav'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
if input_fn[-4:] == '.wav':
return input_fn, sr
else:
output_fn = self.output_fn(input_fn)
subprocess.check_call(f'sox -v 0.95 "{input_fn}" -t wav "{output_fn}"', shell=True)
return output_fn, sr
@register_wav_processors(name='sox_resample')
class ResampleProcessor(BaseWavProcessor):
@property
def name(self):
return 'Resample'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
output_fn = self.output_fn(input_fn)
sr_file = librosa.core.get_samplerate(input_fn)
if sr != sr_file:
subprocess.check_call(f'sox -v 0.95 "{input_fn}" -r{sr} "{output_fn}"', shell=True)
y, _ = librosa.core.load(input_fn, sr=sr)
y, _ = librosa.effects.trim(y)
save_wav(y, output_fn, sr)
return output_fn, sr
else:
return input_fn, sr
@register_wav_processors(name='trim_sil')
class TrimSILProcessor(BaseWavProcessor):
@property
def name(self):
return 'TrimSIL'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
output_fn = self.output_fn(input_fn)
y, _ = librosa.core.load(input_fn, sr=sr)
y, _ = librosa.effects.trim(y)
save_wav(y, output_fn, sr)
return output_fn
@register_wav_processors(name='trim_all_sil')
class TrimAllSILProcessor(BaseWavProcessor):
@property
def name(self):
return 'TrimSIL'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
output_fn = self.output_fn(input_fn)
y, audio_mask, _ = trim_long_silences(
input_fn, vad_max_silence_length=preprocess_args.get('vad_max_silence_length', 12))
save_wav(y, output_fn, sr)
if preprocess_args['save_sil_mask']:
os.makedirs(f'{processed_dir}/sil_mask', exist_ok=True)
np.save(f'{processed_dir}/sil_mask/{item_name}.npy', audio_mask)
return output_fn, sr
@register_wav_processors(name='denoise')
class DenoiseProcessor(BaseWavProcessor):
@property
def name(self):
return 'Denoise'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
output_fn = self.output_fn(input_fn)
rnnoise(input_fn, output_fn, out_sample_rate=sr)
return output_fn, sr
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/wav_processors/common_processors.py |
from . import base_processor
from . import common_processors
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/wav_processors/__init__.py |
REGISTERED_WAV_PROCESSORS = {}
def register_wav_processors(name):
def _f(cls):
REGISTERED_WAV_PROCESSORS[name] = cls
return cls
return _f
def get_wav_processor_cls(name):
return REGISTERED_WAV_PROCESSORS.get(name, None)
class BaseWavProcessor:
@property
def name(self):
raise NotImplementedError
def output_fn(self, input_fn):
return f'{input_fn[:-4]}_{self.name}.wav'
def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args):
raise NotImplementedError
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/wav_processors/base_processor.py |
from . import en | EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/__init__.py |
import re
import jieba
from pypinyin import pinyin, Style
from data_gen.tts.data_gen_utils import PUNCS
from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor
from utils.text_norm import NSWNormalizer
class TxtProcessor(BaseTxtProcessor):
table = {ord(f): ord(t) for f, t in zip(
u':,。!?【】()%#@&1234567890',
u':,.!?[]()%#@&1234567890')}
@staticmethod
def preprocess_text(text):
text = text.translate(TxtProcessor.table)
text = NSWNormalizer(text).normalize(remove_punc=False)
text = re.sub("[\'\"()]+", "", text)
text = re.sub("[-]+", " ", text)
text = re.sub(f"[^ A-Za-z\u4e00-\u9fff{PUNCS}]", "", text)
text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
text = re.sub(f"([{PUNCS}])", r" \1 ", text)
text = re.sub(rf"\s+", r"", text)
text = re.sub(rf"[A-Za-z]+", r"$", text)
return text
@classmethod
def process(cls, txt, pre_align_args):
txt = cls.preprocess_text(txt)
shengmu = pinyin(txt, style=Style.INITIALS) # https://blog.csdn.net/zhoulei124/article/details/89055403
yunmu_finals = pinyin(txt, style=Style.FINALS)
yunmu_tone3 = pinyin(txt, style=Style.FINALS_TONE3)
yunmu = [[t[0] + '5'] if t[0] == f[0] else t for f, t in zip(yunmu_finals, yunmu_tone3)] \
if pre_align_args['use_tone'] else yunmu_finals
assert len(shengmu) == len(yunmu)
phs = ["|"]
for a, b, c in zip(shengmu, yunmu, yunmu_finals):
if a[0] == c[0]:
phs += [a[0], "|"]
else:
phs += [a[0], b[0], "|"]
return phs, txt
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/zh.py |
import re
import unicodedata
from g2p_en import G2p
from g2p_en.expand import normalize_numbers
from nltk import pos_tag
from nltk.tokenize import TweetTokenizer
from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors
from data_gen.tts.data_gen_utils import is_sil_phoneme, PUNCS
class EnG2p(G2p):
word_tokenize = TweetTokenizer().tokenize
def __call__(self, text):
# preprocessing
words = EnG2p.word_tokenize(text)
tokens = pos_tag(words) # tuples of (word, tag)
# steps
prons = []
for word, pos in tokens:
if re.search("[a-z]", word) is None:
pron = [word]
elif word in self.homograph2features: # Check homograph
pron1, pron2, pos1 = self.homograph2features[word]
if pos.startswith(pos1):
pron = pron1
else:
pron = pron2
elif word in self.cmu: # lookup CMU dict
pron = self.cmu[word][0]
else: # predict for oov
pron = self.predict(word)
prons.extend(pron)
prons.extend([" "])
return prons[:-1]
@register_txt_processors('en')
class TxtProcessor(BaseTxtProcessor):
g2p = EnG2p()
@staticmethod
def preprocess_text(text):
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = text.lower()
text = re.sub("[\'\"()]+", "", text)
text = re.sub("[-]+", " ", text)
text = re.sub(f"[^ a-z{PUNCS}]", "", text)
text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> !
text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
text = text.replace("i.e.", "that is")
text = text.replace("i.e.", "that is")
text = text.replace("etc.", "etc")
text = re.sub(f"([{PUNCS}])", r" \1 ", text)
text = re.sub(rf"\s+", r" ", text)
return text
@classmethod
def process(cls, txt, preprocess_args):
txt = cls.preprocess_text(txt).strip()
phs = cls.g2p(txt)
txt_struct = [[w, []] for w in txt.split(" ")]
i_word = 0
for p in phs:
if p == ' ':
i_word += 1
else:
txt_struct[i_word][1].append(p)
txt_struct = cls.postprocess(txt_struct, preprocess_args)
return txt_struct, txt | EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/en.py |
from data_gen.tts.data_gen_utils import is_sil_phoneme
REGISTERED_TEXT_PROCESSORS = {}
def register_txt_processors(name):
def _f(cls):
REGISTERED_TEXT_PROCESSORS[name] = cls
return cls
return _f
def get_txt_processor_cls(name):
return REGISTERED_TEXT_PROCESSORS.get(name, None)
class BaseTxtProcessor:
@staticmethod
def sp_phonemes():
return ['|']
@classmethod
def process(cls, txt, preprocess_args):
raise NotImplementedError
@classmethod
def postprocess(cls, txt_struct, preprocess_args):
# remove sil phoneme in head and tail
while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[0][0]):
txt_struct = txt_struct[1:]
while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[-1][0]):
txt_struct = txt_struct[:-1]
if preprocess_args['with_phsep']:
txt_struct = cls.add_bdr(txt_struct)
if preprocess_args['add_eos_bos']:
txt_struct = [["<BOS>", ["<BOS>"]]] + txt_struct + [["<EOS>", ["<EOS>"]]]
return txt_struct
@classmethod
def add_bdr(cls, txt_struct):
txt_struct_ = []
for i, ts in enumerate(txt_struct):
txt_struct_.append(ts)
if i != len(txt_struct) - 1 and \
not is_sil_phoneme(txt_struct[i][0]) and not is_sil_phoneme(txt_struct[i + 1][0]):
txt_struct_.append(['|', ['|']])
return txt_struct_ | EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/base_text_processor.py |
import re
import jieba
from pypinyin import pinyin, Style
from data_gen.tts.data_gen_utils import PUNCS
from data_gen.tts.txt_processors import zh
from g2pM import G2pM
ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j',
'q', 'x', 'r', 'z', 'c', 's', 'y', 'w']
ALL_YUNMU = ['a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia', 'ian',
'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong', 'ou',
'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn']
class TxtProcessor(zh.TxtProcessor):
model = G2pM()
@staticmethod
def sp_phonemes():
return ['|', '#']
@classmethod
def process(cls, txt, pre_align_args):
txt = cls.preprocess_text(txt)
ph_list = cls.model(txt, tone=pre_align_args['use_tone'], char_split=True)
seg_list = '#'.join(jieba.cut(txt))
assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list)
# 加入词边界'#'
ph_list_ = []
seg_idx = 0
for p in ph_list:
p = p.replace("u:", "v")
if seg_list[seg_idx] == '#':
ph_list_.append('#')
seg_idx += 1
else:
ph_list_.append("|")
seg_idx += 1
if re.findall('[\u4e00-\u9fff]', p):
if pre_align_args['use_tone']:
p = pinyin(p, style=Style.TONE3, strict=True)[0][0]
if p[-1] not in ['1', '2', '3', '4', '5']:
p = p + '5'
else:
p = pinyin(p, style=Style.NORMAL, strict=True)[0][0]
finished = False
if len([c.isalpha() for c in p]) > 1:
for shenmu in ALL_SHENMU:
if p.startswith(shenmu) and not p.lstrip(shenmu).isnumeric():
ph_list_ += [shenmu, p.lstrip(shenmu)]
finished = True
break
if not finished:
ph_list_.append(p)
ph_list = ph_list_
# 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...]
sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes()
ph_list_ = []
for i in range(0, len(ph_list), 1):
if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes):
ph_list_.append(ph_list[i])
ph_list = ph_list_
return ph_list, txt
if __name__ == '__main__':
phs, txt = TxtProcessor.process('他来到了,网易杭研大厦', {'use_tone': True})
print(phs)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py |
## Mel-filterbank
mel_window_length = 25 # In milliseconds
mel_window_step = 10 # In milliseconds
mel_n_channels = 40
## Audio
sampling_rate = 16000
# Number of spectrogram frames in a partial utterance
partials_n_frames = 160 # 1600 ms
# Number of spectrogram frames at inference
inference_n_frames = 80 # 800 ms
## Voice Activation Detection
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
# This sets the granularity of the VAD. Should not need to be changed.
vad_window_length = 30 # In milliseconds
# Number of frames to average together when performing the moving average smoothing.
# The larger this value, the larger the VAD variations must be to not get smoothed out.
vad_moving_average_width = 8
# Maximum number of consecutive silent frames a segment can have.
vad_max_silence_length = 6
## Audio volume normalization
audio_norm_target_dBFS = -30
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_data.py |
## Model parameters
model_hidden_size = 256
model_embedding_size = 256
model_num_layers = 3
## Training parameters
learning_rate_init = 1e-4
speakers_per_batch = 6
utterances_per_speaker = 20
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_model.py |
from data_gen.tts.emotion.params_model import *
from data_gen.tts.emotion.params_data import *
from torch.nn.utils import clip_grad_norm_
from scipy.optimize import brentq
from torch import nn
import numpy as np
import torch
class EmotionEncoder(nn.Module):
def __init__(self, device, loss_device):
super().__init__()
self.loss_device = loss_device
# Network defition
self.lstm = nn.LSTM(input_size=mel_n_channels,
hidden_size=model_hidden_size,
num_layers=model_num_layers,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=model_hidden_size,
out_features=model_embedding_size).to(device)
self.relu = torch.nn.ReLU().to(device)
# Cosine similarity scaling (with fixed initial parameter values)
self.similarity_weight = nn.Parameter(torch.tensor([10.])).to(loss_device)
self.similarity_bias = nn.Parameter(torch.tensor([-5.])).to(loss_device)
# Loss
self.loss_fn = nn.CrossEntropyLoss().to(loss_device)
def do_gradient_ops(self):
# Gradient scale
self.similarity_weight.grad *= 0.01
self.similarity_bias.grad *= 0.01
# Gradient clipping
clip_grad_norm_(self.parameters(), 3, norm_type=2)
def forward(self, utterances, hidden_init=None):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
(batch_size, n_frames, n_channels)
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
"""
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
# and the final cell state.
out, (hidden, cell) = self.lstm(utterances, hidden_init)
# We take only the hidden state of the last layer
embeds_raw = self.relu(self.linear(hidden[-1]))
# L2-normalize it
embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
return embeds
def inference(self, utterances, hidden_init=None):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
(batch_size, n_frames, n_channels)
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
"""
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
# and the final cell state.
out, (hidden, cell) = self.lstm(utterances, hidden_init)
return hidden[-1] | EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/model.py |
from data_gen.tts.emotion.params_data import *
from data_gen.tts.emotion.model import EmotionEncoder
from data_gen.tts.emotion.audio import preprocess_wav # We want to expose this function from here
from matplotlib import cm
from data_gen.tts.emotion import audio
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
_model = None # type: EmotionEncoder
_device = None # type: torch.device
def load_model(weights_fpath: Path, device=None):
"""
Loads the model in memory. If this function is not explicitely called, it will be run on the
first call to embed_frames() with the default weights file.
:param weights_fpath: the path to saved model weights.
:param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The
model will be loaded and will run on this device. Outputs will however always be on the cpu.
If None, will default to your GPU if it"s available, otherwise your CPU.
"""
# TODO: I think the slow loading of the encoder might have something to do with the device it
# was saved on. Worth investigating.
global _model, _device
if device is None:
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
elif isinstance(device, str):
_device = torch.device(device)
_model = EmotionEncoder(_device, torch.device("cpu"))
checkpoint = torch.load(weights_fpath)
_model.load_state_dict(checkpoint["model_state"])
_model.eval()
print("Loaded encoder trained to step %d" % (checkpoint["step"]))
def is_loaded():
return _model is not None
def embed_frames_batch(frames_batch):
"""
Computes embeddings for a batch of mel spectrogram.
:param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape
(batch_size, n_frames, n_channels)
:return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size)
"""
if _model is None:
raise Exception("Model was not loaded. Call load_model() before inference.")
frames = torch.from_numpy(frames_batch).to(_device)
embed = _model.inference(frames).detach().cpu().numpy()
return embed
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames,
min_pad_coverage=0.75, overlap=0.5):
"""
Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain
partial utterances of <partial_utterance_n_frames> each. Both the waveform and the mel
spectrogram slices are returned, so as to make each partial utterance waveform correspond to
its spectrogram. This function assumes that the mel spectrogram parameters used are those
defined in params_data.py.
The returned ranges may be indexing further than the length of the waveform. It is
recommended that you pad the waveform with zeros up to wave_slices[-1].stop.
:param n_samples: the number of samples in the waveform
:param partial_utterance_n_frames: the number of mel spectrogram frames in each partial
utterance
:param min_pad_coverage: when reaching the last partial utterance, it may or may not have
enough frames. If at least <min_pad_coverage> of <partial_utterance_n_frames> are present,
then the last partial utterance will be considered, as if we padded the audio. Otherwise,
it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial
utterance, this parameter is ignored so that the function always returns at least 1 slice.
:param overlap: by how much the partial utterance should overlap. If set to 0, the partial
utterances are entirely disjoint.
:return: the waveform slices and mel spectrogram slices as lists of array slices. Index
respectively the waveform and the mel spectrogram with these slices to obtain the partial
utterances.
"""
assert 0 <= overlap < 1
assert 0 < min_pad_coverage <= 1
samples_per_frame = int((sampling_rate * mel_window_step / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)
# Compute the slices
wav_slices, mel_slices = [], []
steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + partial_utterance_n_frames])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
# Evaluate whether extra padding is warranted or not
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
if coverage < min_pad_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices
def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs):
"""
Computes an embedding for a single utterance.
# TODO: handle multiple wavs to benefit from batching on GPU
:param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32
:param using_partials: if True, then the utterance is split in partial utterances of
<partial_utterance_n_frames> frames and the utterance embedding is computed from their
normalized average. If False, the utterance is instead computed from feeding the entire
spectogram to the network.
:param return_partials: if True, the partial embeddings will also be returned along with the
wav slices that correspond to the partial embeddings.
:param kwargs: additional arguments to compute_partial_splits()
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
<return_partials> is True, the partial utterances as a numpy array of float32 of shape
(n_partials, model_embedding_size) and the wav partials as a list of slices will also be
returned. If <using_partials> is simultaneously set to False, both these values will be None
instead.
"""
# Process the entire utterance if not using partials
if not using_partials:
frames = audio.wav_to_mel_spectrogram(wav)
embed = embed_frames_batch(frames[None, ...])[0]
if return_partials:
return embed, None, None
return embed
# Compute where to split the utterance into partials and pad if necessary
wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs)
max_wave_length = wave_slices[-1].stop
if max_wave_length >= len(wav):
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
# Split the utterance into partials
frames = audio.wav_to_mel_spectrogram(wav)
frames_batch = np.array([frames[s] for s in mel_slices])
partial_embeds = embed_frames_batch(frames_batch)
# Compute the utterance embedding from the partial embeddings
raw_embed = np.mean(partial_embeds, axis=0)
embed = raw_embed / np.linalg.norm(raw_embed, 2)
if return_partials:
return embed, partial_embeds, wave_slices
return embed
def embed_speaker(wavs, **kwargs):
raise NotImplemented()
def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)):
if ax is None:
ax = plt.gca()
if shape is None:
height = int(np.sqrt(len(embed)))
shape = (height, -1)
embed = embed.reshape(shape)
cmap = cm.get_cmap()
mappable = ax.imshow(embed, cmap=cmap)
cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
cbar.set_clim(*color_range)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title(title)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/inference.py |
from scipy.ndimage.morphology import binary_dilation
from data_gen.tts.emotion.params_data import *
from pathlib import Path
from typing import Optional, Union
import numpy as np
import webrtcvad
import librosa
import struct
int16_max = (2 ** 15) - 1
def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],
source_sr: Optional[int] = None):
"""
Applies the preprocessing operations used in training the Speaker Encoder to a waveform
either on disk or in memory. The waveform will be resampled to match the data hyperparameters.
:param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not
just .wav), either the waveform as a numpy array of floats.
:param source_sr: if passing an audio waveform, the sampling rate of the waveform before
preprocessing. After preprocessing, the waveform's sampling rate will match the data
hyperparameters. If passing a filepath, the sampling rate will be automatically detected and
this argument will be ignored.
"""
# Load the wav from disk if needed
if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):
wav, source_sr = librosa.load(str(fpath_or_wav), sr=None)
else:
wav = fpath_or_wav
# Resample the wav if needed
if source_sr is not None and source_sr != sampling_rate:
wav = librosa.resample(wav, source_sr, sampling_rate)
# Apply the preprocessing: normalize volume and shorten long silences
wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)
wav = trim_long_silences(wav)
return wav
def wav_to_mel_spectrogram(wav):
"""
Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform.
Note: this not a log-mel spectrogram.
"""
frames = librosa.feature.melspectrogram(
wav,
sampling_rate,
n_fft=int(sampling_rate * mel_window_length / 1000),
hop_length=int(sampling_rate * mel_window_step / 1000),
n_mels=mel_n_channels
)
return frames.astype(np.float32).T
def trim_long_silences(wav):
"""
Ensures that segments without voice in the waveform remain no longer than a
threshold determined by the VAD parameters in params.py.
:param wav: the raw waveform as a numpy array of floats
:return: the same waveform with silences trimmed away (length <= original wav length)
"""
# Compute the voice detection window size
samples_per_window = (vad_window_length * sampling_rate) // 1000
# Trim the end of the audio to have a multiple of the window size
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
# Convert the float waveform to 16-bit mono PCM
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
# Perform voice activation detection
voice_flags = []
vad = webrtcvad.Vad(mode=3)
for window_start in range(0, len(wav), samples_per_window):
window_end = window_start + samples_per_window
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
sample_rate=sampling_rate))
voice_flags = np.array(voice_flags)
# Smooth the voice detection with a moving average
def moving_average(array, width):
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
ret = np.cumsum(array_padded, dtype=float)
ret[width:] = ret[width:] - ret[:-width]
return ret[width - 1:] / width
audio_mask = moving_average(voice_flags, vad_moving_average_width)
audio_mask = np.round(audio_mask).astype(np.bool)
# Dilate the voiced regions
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
audio_mask = np.repeat(audio_mask, samples_per_window)
return wav[audio_mask == True]
def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False):
if increase_only and decrease_only:
raise ValueError("Both increase only and decrease only are set")
dBFS_change = target_dBFS - 10 * np.log10(np.mean(wav ** 2))
if (dBFS_change < 0 and increase_only) or (dBFS_change > 0 and decrease_only):
return wav
return wav * (10 ** (dBFS_change / 20))
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/audio.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import logging
import math
import numpy, math, pdb, sys, random
import time, os, itertools, shutil, importlib
import argparse
import os
import sys
import glob
from sklearn import metrics
import soundfile as sf
#import sentencepiece as spm
import torch
import inference as encoder
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from resemblyzer import VoiceEncoder, preprocess_wav
def tuneThresholdfromScore(scores, labels, target_fa, target_fr=None):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1)
fnr = 1 - tpr
fnr = fnr * 100
fpr = fpr * 100
tunedThreshold = [];
if target_fr:
for tfr in target_fr:
idx = numpy.nanargmin(numpy.absolute((tfr - fnr)))
tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]);
for tfa in target_fa:
idx = numpy.nanargmin(numpy.absolute((tfa - fpr))) # numpy.where(fpr<=tfa)[0][-1]
tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]);
idxE = numpy.nanargmin(numpy.absolute((fnr - fpr)))
eer = max(fpr[idxE], fnr[idxE])
return (tunedThreshold, eer, fpr, fnr);
def loadWAV(filename, max_frames, evalmode=True, num_eval=10):
# Maximum audio length
max_audio = max_frames * 160 + 240
# Read wav file and convert to torch tensor
audio,sample_rate = sf.read(filename)
feats_v0 = torch.from_numpy(audio).float()
audiosize = audio.shape[0]
if audiosize <= max_audio:
shortage = math.floor((max_audio - audiosize + 1) / 2)
audio = numpy.pad(audio, (shortage, shortage), 'constant', constant_values=0)
audiosize = audio.shape[0]
if evalmode:
startframe = numpy.linspace(0, audiosize - max_audio, num=num_eval)
else:
startframe = numpy.array([numpy.int64(random.random() * (audiosize - max_audio))])
feats = []
if evalmode and max_frames == 0:
feats.append(audio)
else:
for asf in startframe:
feats.append(audio[int(asf):int(asf) + max_audio])
feat = numpy.stack(feats, axis=0)
feat = torch.FloatTensor(feat)
return feat;
def evaluateFromList(listfilename, print_interval=100, test_path='', multi=False):
lines = []
files = []
feats = {}
tstart = time.time()
## Read all lines
with open(listfilename) as listfile:
while True:
line = listfile.readline();
if (not line):
break;
data = line.split();
## Append random label if missing
if len(data) == 2: data = [random.randint(0,1)] + data
files.append(data[1])
files.append(data[2])
lines.append(line)
setfiles = list(set(files))
setfiles.sort()
## Save all features to file
for idx, file in enumerate(setfiles):
# preprocessed_wav = encoder.preprocess_wav(os.path.join(test_path,file))
# embed = encoder.embed_utterance(preprocessed_wav)
processed_wav = preprocess_wav(os.path.join(test_path,file))
embed = voice_encoder.embed_utterance(processed_wav)
torch.cuda.empty_cache()
ref_feat = torch.from_numpy(embed).unsqueeze(0)
feats[file] = ref_feat
telapsed = time.time() - tstart
if idx % print_interval == 0:
sys.stdout.write("\rReading %d of %d: %.2f Hz, embedding size %d"%(idx,len(setfiles),idx/telapsed,ref_feat.size()[1]));
print('')
all_scores = [];
all_labels = [];
all_trials = [];
tstart = time.time()
## Read files and compute all scores
for idx, line in enumerate(lines):
data = line.split();
## Append random label if missing
if len(data) == 2: data = [random.randint(0,1)] + data
ref_feat = feats[data[1]]
com_feat = feats[data[2]]
ref_feat = ref_feat.cuda()
com_feat = com_feat.cuda()
# normalize feats
ref_feat = F.normalize(ref_feat, p=2, dim=1)
com_feat = F.normalize(com_feat, p=2, dim=1)
dist = F.pairwise_distance(ref_feat.unsqueeze(-1), com_feat.unsqueeze(-1)).detach().cpu().numpy();
score = -1 * numpy.mean(dist);
all_scores.append(score);
all_labels.append(int(data[0]));
all_trials.append(data[1]+" "+data[2])
if idx % print_interval == 0:
telapsed = time.time() - tstart
sys.stdout.write("\rComputing %d of %d: %.2f Hz"%(idx,len(lines),idx/telapsed));
sys.stdout.flush();
print('\n')
return (all_scores, all_labels, all_trials);
if __name__ == '__main__':
parser = argparse.ArgumentParser("baseline")
parser.add_argument("--data_root", type=str, help="", required=True)
parser.add_argument("--list", type=str, help="", required=True)
parser.add_argument("--model_dir", type=str, help="model parameters for AudioEncoder", required=True)
args = parser.parse_args()
# Load the models one by one.
print("Preparing the encoder...")
# encoder.load_model(Path(args.model_dir))
print("Insert the wav file name...")
voice_encoder = VoiceEncoder().cuda()
sc, lab, trials = evaluateFromList(args.list, print_interval=100, test_path=args.data_root)
result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
print('EER %2.4f'%result[1])
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/data_gen/tts/emotion/test_emotion.py |
import os
import subprocess
def link_file(from_file, to_file):
subprocess.check_call(
f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True)
def move_file(from_file, to_file):
subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True)
def copy_file(from_file, to_file):
subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True)
def remove_file(*fns):
for f in fns:
subprocess.check_call(f'rm -rf "{f}"', shell=True) | EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/os_utils.py |
import matplotlib.pyplot as plt
import numpy as np
import torch
LINE_COLORS = ['w', 'r', 'y', 'cyan', 'm', 'b', 'lime']
def spec_to_figure(spec, vmin=None, vmax=None):
if isinstance(spec, torch.Tensor):
spec = spec.cpu().numpy()
fig = plt.figure(figsize=(12, 6))
plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
return fig
def spec_f0_to_figure(spec, f0s, figsize=None):
max_y = spec.shape[1]
if isinstance(spec, torch.Tensor):
spec = spec.detach().cpu().numpy()
f0s = {k: f0.detach().cpu().numpy() for k, f0 in f0s.items()}
f0s = {k: f0 / 10 for k, f0 in f0s.items()}
fig = plt.figure(figsize=(12, 6) if figsize is None else figsize)
plt.pcolor(spec.T)
for i, (k, f0) in enumerate(f0s.items()):
plt.plot(f0.clip(0, max_y), label=k, c=LINE_COLORS[i], linewidth=1, alpha=0.8)
plt.legend()
return fig
def dur_to_figure(dur_gt, dur_pred, txt):
dur_gt = dur_gt.long().cpu().numpy()
dur_pred = dur_pred.long().cpu().numpy()
dur_gt = np.cumsum(dur_gt)
dur_pred = np.cumsum(dur_pred)
fig = plt.figure(figsize=(12, 6))
for i in range(len(dur_gt)):
shift = (i % 8) + 1
plt.text(dur_gt[i], shift, txt[i])
plt.text(dur_pred[i], 10 + shift, txt[i])
plt.vlines(dur_gt[i], 0, 10, colors='b') # blue is gt
plt.vlines(dur_pred[i], 10, 20, colors='r') # red is pred
return fig
def f0_to_figure(f0_gt, f0_cwt=None, f0_pred=None):
fig = plt.figure()
f0_gt = f0_gt.cpu().numpy()
plt.plot(f0_gt, color='r', label='gt')
if f0_cwt is not None:
f0_cwt = f0_cwt.cpu().numpy()
plt.plot(f0_cwt, color='b', label='cwt')
if f0_pred is not None:
f0_pred = f0_pred.cpu().numpy()
plt.plot(f0_pred, color='green', label='pred')
plt.legend()
return fig
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/plot.py |
import argparse
import os
import yaml
global_print_hparams = True
hparams = {}
class Args:
def __init__(self, **kwargs):
for k, v in kwargs.items():
self.__setattr__(k, v)
def override_config(old_config: dict, new_config: dict):
for k, v in new_config.items():
if isinstance(v, dict) and k in old_config:
override_config(old_config[k], new_config[k])
else:
old_config[k] = v
def set_hparams(config='', exp_name='', hparams_str='', print_hparams=True, global_hparams=True):
if config == '' and exp_name == '':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--config', type=str, default='',
help='location of the data corpus')
parser.add_argument('--exp_name', type=str, default='', help='exp_name')
parser.add_argument('-hp', '--hparams', type=str, default='',
help='location of the data corpus')
parser.add_argument('--infer', action='store_true', help='infer')
parser.add_argument('--validate', action='store_true', help='validate')
parser.add_argument('--reset', action='store_true', help='reset hparams')
parser.add_argument('--remove', action='store_true', help='remove old ckpt')
parser.add_argument('--debug', action='store_true', help='debug')
args, unknown = parser.parse_known_args()
print("| Unknow hparams: ", unknown)
else:
args = Args(config=config, exp_name=exp_name, hparams=hparams_str,
infer=False, validate=False, reset=False, debug=False, remove=False)
global hparams
assert args.config != '' or args.exp_name != ''
if args.config != '':
assert os.path.exists(args.config)
config_chains = []
loaded_config = set()
def load_config(config_fn):
# deep first inheritance and avoid the second visit of one node
if not os.path.exists(config_fn):
return {}
with open(config_fn) as f:
hparams_ = yaml.safe_load(f)
loaded_config.add(config_fn)
if 'base_config' in hparams_:
ret_hparams = {}
if not isinstance(hparams_['base_config'], list):
hparams_['base_config'] = [hparams_['base_config']]
for c in hparams_['base_config']:
if c.startswith('.'):
c = f'{os.path.dirname(config_fn)}/{c}'
c = os.path.normpath(c)
if c not in loaded_config:
override_config(ret_hparams, load_config(c))
override_config(ret_hparams, hparams_)
else:
ret_hparams = hparams_
config_chains.append(config_fn)
return ret_hparams
saved_hparams = {}
args_work_dir = ''
if args.exp_name != '':
args_work_dir = f'{args.exp_name}' # modified
ckpt_config_path = f'{args_work_dir}/config.yaml'
if os.path.exists(ckpt_config_path):
with open(ckpt_config_path) as f:
saved_hparams_ = yaml.safe_load(f)
if saved_hparams_ is not None:
saved_hparams.update(saved_hparams_)
hparams_ = {}
if args.config != '':
hparams_.update(load_config(args.config))
if not args.reset:
hparams_.update(saved_hparams)
hparams_['work_dir'] = args_work_dir
# Support config overriding in command line. Support list type config overriding.
# Examples: --hparams="a=1,b.c=2,d=[1 1 1]"
if args.hparams != "":
for new_hparam in args.hparams.split(","):
k, v = new_hparam.split("=")
v = v.strip("\'\" ")
config_node = hparams_
for k_ in k.split(".")[:-1]:
config_node = config_node[k_]
k = k.split(".")[-1]
if v in ['True', 'False'] or type(config_node[k]) in [bool, list, dict]:
if type(config_node[k]) == list:
v = v.replace(" ", ",")
config_node[k] = eval(v)
else:
config_node[k] = type(config_node[k])(v)
if args_work_dir != '' and args.remove:
answer = input("REMOVE old checkpoint? Y/N [Default: N]: ")
if answer.lower() == "y":
remove_file(args_work_dir)
if args_work_dir != '' and (not os.path.exists(ckpt_config_path) or args.reset) and not args.infer:
os.makedirs(hparams_['work_dir'], exist_ok=True)
with open(ckpt_config_path, 'w') as f:
yaml.safe_dump(hparams_, f)
hparams_['infer'] = args.infer
hparams_['debug'] = args.debug
hparams_['validate'] = args.validate
hparams_['exp_name'] = args.exp_name
global global_print_hparams
if global_hparams:
hparams.clear()
hparams.update(hparams_)
if print_hparams and global_print_hparams and global_hparams:
print('| Hparams chains: ', config_chains)
print('| Hparams: ')
for i, (k, v) in enumerate(sorted(hparams_.items())):
print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "")
print("")
global_print_hparams = False
return hparams_ | EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/hparams.py |
import librosa
import numpy as np
from pycwt import wavelet
from scipy.interpolate import interp1d
def load_wav(wav_file, sr):
wav, _ = librosa.load(wav_file, sr=sr, mono=True)
return wav
def convert_continuos_f0(f0):
'''CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
'''
# get uv information as binary
f0 = np.copy(f0)
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
print("| all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def get_cont_lf0(f0, frame_period=5.0):
uv, cont_f0_lpf = convert_continuos_f0(f0)
# cont_f0_lpf = low_pass_filter(cont_f0_lpf, int(1.0 / (frame_period * 0.001)), cutoff=20)
cont_lf0_lpf = np.log(cont_f0_lpf)
return uv, cont_lf0_lpf
def get_lf0_cwt(lf0):
'''
input:
signal of shape (N)
output:
Wavelet_lf0 of shape(10, N), scales of shape(10)
'''
mother = wavelet.MexicanHat()
dt = 0.005
dj = 1
s0 = dt * 2
J = 9
Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother)
# Wavelet.shape => (J + 1, len(lf0))
Wavelet_lf0 = np.real(Wavelet_lf0).T
return Wavelet_lf0, scales
def norm_scale(Wavelet_lf0):
Wavelet_lf0_norm = np.zeros((Wavelet_lf0.shape[0], Wavelet_lf0.shape[1]))
mean = Wavelet_lf0.mean(0)[None, :]
std = Wavelet_lf0.std(0)[None, :]
Wavelet_lf0_norm = (Wavelet_lf0 - mean) / std
return Wavelet_lf0_norm, mean, std
def normalize_cwt_lf0(f0, mean, std):
uv, cont_lf0_lpf = get_cont_lf0(f0)
cont_lf0_norm = (cont_lf0_lpf - mean) / std
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_norm)
Wavelet_lf0_norm, _, _ = norm_scale(Wavelet_lf0)
return Wavelet_lf0_norm
def get_lf0_cwt_norm(f0s, mean, std):
uvs = list()
cont_lf0_lpfs = list()
cont_lf0_lpf_norms = list()
Wavelet_lf0s = list()
Wavelet_lf0s_norm = list()
scaless = list()
means = list()
stds = list()
for f0 in f0s:
uv, cont_lf0_lpf = get_cont_lf0(f0)
cont_lf0_lpf_norm = (cont_lf0_lpf - mean) / std
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) # [560,10]
Wavelet_lf0_norm, mean_scale, std_scale = norm_scale(Wavelet_lf0) # [560,10],[1,10],[1,10]
Wavelet_lf0s_norm.append(Wavelet_lf0_norm)
uvs.append(uv)
cont_lf0_lpfs.append(cont_lf0_lpf)
cont_lf0_lpf_norms.append(cont_lf0_lpf_norm)
Wavelet_lf0s.append(Wavelet_lf0)
scaless.append(scales)
means.append(mean_scale)
stds.append(std_scale)
return Wavelet_lf0s_norm, scaless, means, stds
def inverse_cwt_torch(Wavelet_lf0, scales):
import torch
b = ((torch.arange(0, len(scales)).float().to(Wavelet_lf0.device)[None, None, :] + 1 + 2.5) ** (-2.5))
lf0_rec = Wavelet_lf0 * b
lf0_rec_sum = lf0_rec.sum(-1)
lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdim=True)) / lf0_rec_sum.std(-1, keepdim=True)
return lf0_rec_sum
def inverse_cwt(Wavelet_lf0, scales):
b = ((np.arange(0, len(scales))[None, None, :] + 1 + 2.5) ** (-2.5))
lf0_rec = Wavelet_lf0 * b
lf0_rec_sum = lf0_rec.sum(-1)
lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdims=True)) / lf0_rec_sum.std(-1, keepdims=True)
return lf0_rec_sum
def cwt2f0(cwt_spec, mean, std, cwt_scales):
assert len(mean.shape) == 1 and len(std.shape) == 1 and len(cwt_spec.shape) == 3
import torch
if isinstance(cwt_spec, torch.Tensor):
f0 = inverse_cwt_torch(cwt_spec, cwt_scales)
f0 = f0 * std[:, None] + mean[:, None]
f0 = f0.exp() # [B, T]
else:
f0 = inverse_cwt(cwt_spec, cwt_scales)
f0 = f0 * std[:, None] + mean[:, None]
f0 = np.exp(f0) # [B, T]
return f0
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/cwt.py |
import matplotlib
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
matplotlib.use('Agg')
import glob
import itertools
import subprocess
import threading
import traceback
from pytorch_lightning.callbacks import GradientAccumulationScheduler
from pytorch_lightning.callbacks import ModelCheckpoint
from functools import wraps
from torch.cuda._utils import _get_device_index
import numpy as np
import torch.optim
import torch.utils.data
import copy
import logging
import os
import re
import sys
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tqdm
from torch.optim.optimizer import Optimizer
def get_a_var(obj): # pragma: no cover
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def data_loader(fn):
"""
Decorator to make any fx with this use the lazy property
:param fn:
:return:
"""
wraps(fn)
attr_name = '_lazy_' + fn.__name__
def _get_data_loader(self):
try:
value = getattr(self, attr_name)
except AttributeError:
try:
value = fn(self) # Lazy evaluation, done only once.
if (
value is not None and
not isinstance(value, list) and
fn.__name__ in ['test_dataloader', 'val_dataloader']
):
value = [value]
except AttributeError as e:
# Guard against AttributeError suppression. (Issue #142)
traceback.print_exc()
error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
raise RuntimeError(error) from e
setattr(self, attr_name, value) # Memoize evaluation.
return value
return _get_data_loader
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
# ---------------
# CHANGE
if module.training:
output = module.training_step(*input, **kwargs)
elif module.testing:
output = module.test_step(*input, **kwargs)
else:
output = module.validation_step(*input, **kwargs)
# ---------------
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
# make sure each module knows what training state it's in...
# fixes weird bug where copies are out of sync
root_m = modules[0]
for m in modules[1:]:
m.training = root_m.training
m.testing = root_m.testing
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
def _find_tensors(obj): # pragma: no cover
r"""
Recursively find all tensors contained in the specified object.
"""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
class DDP(DistributedDataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def forward(self, *inputs, **kwargs): # pragma: no cover
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# --------------
# LIGHTNING MOD
# --------------
# normal
# output = self.module(*inputs[0], **kwargs[0])
# lightning
if self.module.training:
output = self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
output = self.module.test_step(*inputs[0], **kwargs[0])
else:
output = self.module.validation_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
# normal
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled():
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
return output
class DP(DataParallel):
"""
Override the forward call in lightning so it goes to training and validation step respectively
"""
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in itertools.chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
# lightning
if self.module.training:
return self.module.training_step(*inputs[0], **kwargs[0])
elif self.module.testing:
return self.module.test_step(*inputs[0], **kwargs[0])
else:
return self.module.validation_step(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
class GradientAccumulationScheduler:
def __init__(self, scheduling: dict):
if scheduling == {}: # empty dict error
raise TypeError("Empty dict cannot be interpreted correct")
for key in scheduling.keys():
if not isinstance(key, int) or not isinstance(scheduling[key], int):
raise TypeError("All epoches and accumulation factor must be integers")
minimal_epoch = min(scheduling.keys())
if minimal_epoch < 1:
msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
raise IndexError(msg)
elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
scheduling.update({1: 1})
self.scheduling = scheduling
self.epochs = sorted(scheduling.keys())
def on_epoch_begin(self, epoch, trainer):
epoch += 1 # indexing epochs from 1
for i in reversed(range(len(self.epochs))):
if epoch >= self.epochs[i]:
trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
break
class LatestModelCheckpoint(ModelCheckpoint):
def __init__(self, filepath, monitor='val_loss', verbose=0, num_ckpt_keep=5,
save_weights_only=False, mode='auto', period=1, prefix='model', save_best=True):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
os.makedirs(filepath, exist_ok=True)
self.num_ckpt_keep = num_ckpt_keep
self.save_best = save_best
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_check = 0
self.prefix = prefix
self.best_k_models = {}
# {filename: monitor}
self.kth_best_model = ''
self.save_top_k = 1
self.task = None
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
self.mode = 'max'
else:
self.monitor_op = np.less
self.best = np.Inf
self.mode = 'min'
if os.path.exists(f'{self.filepath}/best_valid.npy'):
self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
def get_all_ckpts(self):
return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_check += 1
best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
if self.epochs_since_last_check >= self.period:
self.epochs_since_last_check = 0
filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
if self.verbose > 0:
logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
self._save_model(filepath)
for old_ckpt in self.get_all_ckpts()[self.num_ckpt_keep:]:
subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True)
if self.verbose > 0:
logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
current = logs.get(self.monitor)
if current is not None and self.save_best:
if self.monitor_op(current, self.best):
self.best = current
if self.verbose > 0:
logging.info(
f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
f' {current:0.5f} (best {self.best:0.5f}), saving model to'
f' {best_filepath} as top 1')
self._save_model(best_filepath)
np.save(f'{self.filepath}/best_valid.npy', [self.best])
class BaseTrainer:
def __init__(
self,
logger=True,
checkpoint_callback=True,
default_save_path=None,
gradient_clip_val=0,
process_position=0,
gpus=-1,
log_gpu_memory=None,
show_progress_bar=True,
track_grad_norm=-1,
check_val_every_n_epoch=1,
accumulate_grad_batches=1,
max_updates=1000,
min_epochs=1,
val_check_interval=1.0,
log_save_interval=100,
row_log_interval=10,
print_nan_grads=False,
weights_summary='full',
num_sanity_val_steps=5,
resume_from_checkpoint=None,
):
self.log_gpu_memory = log_gpu_memory
self.gradient_clip_val = gradient_clip_val
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
self.max_updates = max_updates
self.min_epochs = min_epochs
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.resume_from_checkpoint = resume_from_checkpoint
self.default_save_path = default_save_path
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.get_train_dataloader = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
self.is_iterable_train_dataloader = False
# training state
self.model = None
self.testing = False
self.disable_validation = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.checkpoint_callback.save_function = self.save_checkpoint
self.weights_save_path = self.checkpoint_callback.filepath
# accumulated grads
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
self.data_parallel_device_ids = [
int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != '']
if len(self.data_parallel_device_ids) == 0:
self.root_gpu = None
self.on_gpu = False
else:
self.root_gpu = self.data_parallel_device_ids[0]
self.on_gpu = True
# distributed backend choice
self.use_ddp = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
self.set_distributed_mode(self.distributed_backend)
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
self.logger = logger
self.logger.rank = 0
self.row_log_interval = row_log_interval
@property
def num_gpus(self):
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
else:
return len(gpus)
@property
def data_parallel(self):
return self.use_dp or self.use_ddp
def get_model(self):
is_dp_module = isinstance(self.model, (DDP, DP))
model = self.model.module if is_dp_module else self.model
return model
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(self, model):
if self.use_ddp:
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
else:
model.model = model.build_model()
if not self.testing:
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
if self.use_dp:
model.cuda(self.root_gpu)
model = DP(model, device_ids=self.data_parallel_device_ids)
elif self.single_gpu:
model.cuda(self.root_gpu)
self.run_pretrain_routine(model)
return 1
def init_optimizers(self, optimizers):
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
elif len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
return optimizers, lr_schedulers
# single list or tuple
elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
return optimizers, []
def run_pretrain_routine(self, model):
"""Sanity check a few things before starting actual training.
:param model:
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# link up experiment object
if self.logger is not None:
ref_model.logger = self.logger
self.logger.save()
if self.use_ddp:
dist.barrier()
# set up checkpoint callback
# self.configure_checkpoint_callback()
# transfer data loaders from model
self.get_dataloaders(ref_model)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
self.run_evaluation(test=True)
return
# check if we should run validation during training
self.disable_validation = self.num_val_batches == 0
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
ref_model.on_train_start()
if not self.disable_validation and self.num_sanity_val_steps > 0:
# init progress bars for validation sanity check
pbar = tqdm.tqdm(desc='Validation sanity check',
total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm.tqdm(disable=True)
self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# init progress bar
pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model):
self.testing = True
self.fit(model)
@property
def training_tqdm_dict(self):
tqdm_dict = {
'step': '{}'.format(self.global_step),
}
tqdm_dict.update(self.tqdm_metrics)
return tqdm_dict
# --------------------
# restore ckpt
# --------------------
def restore_weights(self, model):
"""
To restore weights we have two cases.
First, attempt to restore hpc weights. If successful, don't restore
other weights.
Otherwise, try to restore actual weights
:param model:
:return:
"""
# clear cache before restore
if self.on_gpu:
torch.cuda.empty_cache()
if self.resume_from_checkpoint is not None:
self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
else:
# restore weights if same exp version
self.restore_state_if_checkpoint_exists(model)
# wait for all models to restore weights
if self.use_ddp:
# wait for all processes to catch up
dist.barrier()
# clear cache after restore
if self.on_gpu:
torch.cuda.empty_cache()
def restore_state_if_checkpoint_exists(self, model):
did_restore = False
# do nothing if there's not dir or callback
no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
return did_restore
# restore trainer state and model if there is a weight for this experiment
last_steps = -1
last_ckpt_name = None
# find last epoch
checkpoints = os.listdir(self.checkpoint_callback.filepath)
for name in checkpoints:
if '.ckpt' in name and not name.endswith('part'):
if 'steps_' in name:
steps = name.split('steps_')[1]
steps = int(re.sub('[^0-9]', '', steps))
if steps > last_steps:
last_steps = steps
last_ckpt_name = name
# restore last checkpoint
if last_ckpt_name is not None:
last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
self.restore(last_ckpt_path, self.on_gpu)
logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
did_restore = True
return did_restore
def restore(self, checkpoint_path, on_gpu):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
# load model state
model = self.get_model()
# load the state_dict on the model automatically
model.load_state_dict(checkpoint['state_dict'], strict=False)
if on_gpu:
model.cuda(self.root_gpu)
# load training state (affects trainer only)
self.restore_training_state(checkpoint)
model.global_step = self.global_step
del checkpoint
try:
if dist.is_initialized() and dist.get_rank() > 0:
return
except Exception as e:
print(e)
return
def restore_training_state(self, checkpoint):
"""
Restore trainer state.
Model will get its change to update
:param checkpoint:
:return:
"""
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
self.global_step = checkpoint['global_step']
self.current_epoch = checkpoint['epoch']
if self.testing:
return
# restore the optimizers
optimizer_states = checkpoint['optimizer_states']
for optimizer, opt_state in zip(self.optimizers, optimizer_states):
if optimizer is None:
return
optimizer.load_state_dict(opt_state)
# move optimizer to GPU 1 weight at a time
# avoids OOM
if self.root_gpu is not None:
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(self.root_gpu)
# restore the lr schedulers
lr_schedulers = checkpoint['lr_schedulers']
for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
scheduler.load_state_dict(lrs_state)
# --------------------
# MODEL SAVE CHECKPOINT
# --------------------
def _atomic_save(self, checkpoint, filepath):
"""Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
saving is finished.
Args:
checkpoint (object): The object to save.
Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
accepts.
filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
This points to the file that the checkpoint will be stored in.
"""
tmp_path = str(filepath) + ".part"
torch.save(checkpoint, tmp_path)
os.replace(tmp_path, filepath)
def save_checkpoint(self, filepath):
checkpoint = self.dump_checkpoint()
self._atomic_save(checkpoint, filepath)
def dump_checkpoint(self):
checkpoint = {
'epoch': self.current_epoch,
'global_step': self.global_step
}
if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
# save optimizers
optimizer_states = []
for i, optimizer in enumerate(self.optimizers):
if optimizer is not None:
optimizer_states.append(optimizer.state_dict())
checkpoint['optimizer_states'] = optimizer_states
# save lr schedulers
lr_schedulers = []
for i, scheduler in enumerate(self.lr_schedulers):
lr_schedulers.append(scheduler.state_dict())
checkpoint['lr_schedulers'] = lr_schedulers
# add the hparams and state_dict from the model
model = self.get_model()
checkpoint['state_dict'] = model.state_dict()
# give the model a chance to add a few things
model.on_save_checkpoint(checkpoint)
return checkpoint
def copy_trainer_model_properties(self, model):
if isinstance(model, DP):
ref_model = model.module
elif isinstance(model, DDP):
ref_model = model.module
else:
ref_model = model
for m in [model, ref_model]:
m.trainer = self
m.on_gpu = self.on_gpu
m.use_dp = self.use_dp
m.use_ddp = self.use_ddp
m.testing = self.testing
m.single_gpu = self.single_gpu
def transfer_batch_to_gpu(self, batch, gpu_id):
# base case: object can be directly moved using `cuda` or `to`
if callable(getattr(batch, 'cuda', None)):
return batch.cuda(gpu_id, non_blocking=True)
elif callable(getattr(batch, 'to', None)):
return batch.to(torch.device('cuda', gpu_id), non_blocking=True)
# when list
elif isinstance(batch, list):
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return batch
# when tuple
elif isinstance(batch, tuple):
batch = list(batch)
for i, x in enumerate(batch):
batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
return tuple(batch)
# when dict
elif isinstance(batch, dict):
for k, v in batch.items():
batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
return batch
# nothing matches, return the value as is without transform
return batch
def set_distributed_mode(self, distributed_backend):
# skip for CPU
if self.num_gpus == 0:
return
# single GPU case
# in single gpu case we allow ddp so we can train on multiple
# nodes, 1 gpu per node
elif self.num_gpus == 1:
self.single_gpu = True
self.use_dp = False
self.use_ddp = False
self.root_gpu = 0
self.data_parallel_device_ids = [0]
else:
if distributed_backend is not None:
self.use_dp = distributed_backend == 'dp'
self.use_ddp = distributed_backend == 'ddp'
elif distributed_backend is None:
self.use_dp = True
self.use_ddp = False
logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
def ddp_train(self, gpu_idx, model):
"""
Entry point into a DP thread
:param gpu_idx:
:param model:
:param cluster_obj:
:return:
"""
# otherwise default to node rank 0
self.node_rank = 0
# show progressbar only on progress_rank 0
self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
# determine which process we are and world size
if self.use_ddp:
self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
self.world_size = self.num_gpus
# let the exp know the rank to avoid overwriting logs
if self.logger is not None:
self.logger.rank = self.proc_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self
model.init_ddp_connection(self.proc_rank, self.world_size)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
model.model = model.build_model()
if not self.testing:
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
# MODEL
# copy model to each gpu
if self.distributed_backend == 'ddp':
torch.cuda.set_device(gpu_idx)
model.cuda(gpu_idx)
# set model properties before going into wrapper
self.copy_trainer_model_properties(model)
# override root GPU
self.root_gpu = gpu_idx
if self.distributed_backend == 'ddp':
device_ids = [gpu_idx]
else:
device_ids = None
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# continue training routine
self.run_pretrain_routine(model)
def resolve_root_node_address(self, root_node):
if '[' in root_node:
name = root_node.split('[')[0]
number = root_node.split(',')[0]
if '-' in number:
number = number.split('-')[0]
number = re.sub('[^0-9]', '', number)
root_node = name + number
return root_node
def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
:param metrics:
:param grad_norm_dic:
"""
# added metrics by Lightning for convenience
metrics['epoch'] = self.current_epoch
# add norms
metrics.update(grad_norm_dic)
# turn all tensors to scalars
scalar_metrics = self.metrics_to_scalars(metrics)
step = step if step is not None else self.global_step
# log actual metrics
if self.proc_rank == 0 and self.logger is not None:
self.logger.log_metrics(scalar_metrics, step=step)
self.logger.save()
def add_tqdm_metrics(self, metrics):
for k, v in metrics.items():
if type(v) is torch.Tensor:
v = v.item()
self.tqdm_metrics[k] = v
def metrics_to_scalars(self, metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = self.metrics_to_scalars(v)
new_metrics[k] = v
return new_metrics
def process_output(self, output, train=False):
"""Reduces output according to the training mode.
Separates loss from logging and tqdm metrics
:param output:
:return:
"""
# ---------------
# EXTRACT CALLBACK KEYS
# ---------------
# all keys not progress_bar or log are candidates for callbacks
callback_metrics = {}
for k, v in output.items():
if k not in ['progress_bar', 'log', 'hiddens']:
callback_metrics[k] = v
if train and self.use_dp:
num_gpus = self.num_gpus
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
# ---------------
# EXTRACT PROGRESS BAR KEYS
# ---------------
try:
progress_output = output['progress_bar']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
progress_bar_metrics = progress_output
except Exception:
progress_bar_metrics = {}
# ---------------
# EXTRACT LOGGING KEYS
# ---------------
# extract metrics to log to experiment
try:
log_output = output['log']
# reduce progress metrics for tqdm when using dp
if train and self.use_dp:
num_gpus = self.num_gpus
log_output = self.reduce_distributed_output(log_output, num_gpus)
log_metrics = log_output
except Exception:
log_metrics = {}
# ---------------
# EXTRACT LOSS
# ---------------
# if output dict doesn't have the keyword loss
# then assume the output=loss if scalar
loss = None
if train:
try:
loss = output['loss']
except Exception:
if type(output) is torch.Tensor:
loss = output
else:
raise RuntimeError(
'No `loss` value in the dictionary returned from `model.training_step()`.'
)
# when using dp need to reduce the loss
if self.use_dp:
loss = self.reduce_distributed_output(loss, self.num_gpus)
# ---------------
# EXTRACT HIDDEN
# ---------------
hiddens = output.get('hiddens')
# use every metric passed in as a candidate for callback
callback_metrics.update(progress_bar_metrics)
callback_metrics.update(log_metrics)
# convert tensors to numpy
for k, v in callback_metrics.items():
if isinstance(v, torch.Tensor):
callback_metrics[k] = v.item()
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
def reduce_distributed_output(self, output, num_gpus):
if num_gpus <= 1:
return output
# when using DP, we get one output per gpu
# average outputs and return
if type(output) is torch.Tensor:
return output.mean()
for k, v in output.items():
# recurse on nested dics
if isinstance(output[k], dict):
output[k] = self.reduce_distributed_output(output[k], num_gpus)
# do nothing when there's a scalar
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
pass
# reduce only metrics that have the same number of gpus
elif output[k].size(0) == num_gpus:
reduced = torch.mean(output[k])
output[k] = reduced
return output
def clip_gradients(self):
if self.gradient_clip_val > 0:
model = self.get_model()
torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
def print_nan_gradients(self):
model = self.get_model()
for param in model.parameters():
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
logging.info(param, param.grad)
def configure_accumulated_gradients(self, accumulate_grad_batches):
self.accumulate_grad_batches = None
if isinstance(accumulate_grad_batches, dict):
self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
schedule = {1: accumulate_grad_batches}
self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
else:
raise TypeError("Gradient accumulation supports only int and dict types")
def get_dataloaders(self, model):
if not self.testing:
self.init_train_dataloader(model)
self.init_val_dataloader(model)
else:
self.init_test_dataloader(model)
if self.use_ddp:
dist.barrier()
if not self.testing:
self.get_train_dataloader()
self.get_val_dataloaders()
else:
self.get_test_dataloaders()
def init_train_dataloader(self, model):
self.fisrt_epoch = True
self.get_train_dataloader = model.train_dataloader
if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
self.num_training_batches = len(self.get_train_dataloader())
self.num_training_batches = int(self.num_training_batches)
else:
self.num_training_batches = float('inf')
self.is_iterable_train_dataloader = True
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
else:
self._percent_range_check('val_check_interval')
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
def init_val_dataloader(self, model):
self.get_val_dataloaders = model.val_dataloader
self.num_val_batches = 0
if self.get_val_dataloaders() is not None:
if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
self.num_val_batches = int(self.num_val_batches)
else:
self.num_val_batches = float('inf')
def init_test_dataloader(self, model):
self.get_test_dataloaders = model.test_dataloader
if self.get_test_dataloaders() is not None:
if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
self.num_test_batches = int(self.num_test_batches)
else:
self.num_test_batches = float('inf')
def evaluate(self, model, dataloaders, max_batches, test=False):
"""Run evaluation code.
:param model: PT model
:param dataloaders: list of PT dataloaders
:param max_batches: Scalar
:param test: boolean
:return:
"""
# enable eval mode
model.zero_grad()
model.eval()
# copy properties for forward overrides
self.copy_trainer_model_properties(model)
# disable gradients to save memory
torch.set_grad_enabled(False)
if test:
self.get_model().test_start()
# bookkeeping
outputs = []
# run training
for dataloader_idx, dataloader in enumerate(dataloaders):
dl_outputs = []
for batch_idx, batch in enumerate(dataloader):
if batch is None: # pragma: no cover
continue
# stop short when on fast_dev_run (sets max_batch=1)
if batch_idx >= max_batches:
break
# -----------------
# RUN EVALUATION STEP
# -----------------
output = self.evaluation_forward(model,
batch,
batch_idx,
dataloader_idx,
test)
# track outputs for collation
dl_outputs.append(output)
# batch done
if test:
self.test_progress_bar.update(1)
else:
self.val_progress_bar.update(1)
outputs.append(dl_outputs)
# with a single dataloader don't pass an array
if len(dataloaders) == 1:
outputs = outputs[0]
# give model a chance to do something with the outputs (and method defined)
model = self.get_model()
if test:
eval_results_ = model.test_end(outputs)
else:
eval_results_ = model.validation_end(outputs)
eval_results = eval_results_
# enable train mode again
model.train()
# enable gradients to save memory
torch.set_grad_enabled(True)
return eval_results
def run_evaluation(self, test=False):
# when testing make sure user defined a test step
model = self.get_model()
model.on_pre_performance_check()
# select dataloaders
if test:
dataloaders = self.get_test_dataloaders()
max_batches = self.num_test_batches
else:
# val
dataloaders = self.get_val_dataloaders()
max_batches = self.num_val_batches
# init validation or test progress bar
# main progress bar will already be closed when testing so initial position is free
position = 2 * self.process_position + (not test)
desc = 'Testing' if test else 'Validating'
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
disable=not self.show_progress_bar, dynamic_ncols=True,
unit='batch', file=sys.stdout)
setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
# run evaluation
eval_results = self.evaluate(self.model,
dataloaders,
max_batches,
test)
if eval_results is not None:
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
eval_results)
# add metrics to prog bar
self.add_tqdm_metrics(prog_bar_metrics)
# log metrics
self.log_metrics(log_metrics, {})
# track metrics for callbacks
self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
# add model specific metrics
tqdm_metrics = self.training_tqdm_dict
if not test:
self.main_progress_bar.set_postfix(**tqdm_metrics)
# close progress bar
if test:
self.test_progress_bar.close()
else:
self.val_progress_bar.close()
# model checkpointing
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
logs=self.callback_metrics)
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]
if test and len(self.get_test_dataloaders()) > 1:
args.append(dataloader_idx)
elif not test and len(self.get_val_dataloaders()) > 1:
args.append(dataloader_idx)
# handle DP, DDP forward
if self.use_ddp or self.use_dp:
output = model(*args)
return output
# single GPU
if self.single_gpu:
# for single GPU put inputs on gpu manually
root_gpu = 0
if isinstance(self.data_parallel_device_ids, list):
root_gpu = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(batch, root_gpu)
args[0] = batch
# CPU
if test:
output = model.test_step(*args)
else:
output = model.validation_step(*args)
return output
def train(self):
model = self.get_model()
# run all epochs
for epoch in range(self.current_epoch, 1000000):
# set seed for distributed sampler (enables shuffling for each epoch)
if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
self.get_train_dataloader().sampler.set_epoch(epoch)
# get model
model = self.get_model()
# update training progress in trainer and model
model.current_epoch = epoch
self.current_epoch = epoch
total_val_batches = 0
if not self.disable_validation:
# val can be checked multiple times in epoch
is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
val_checks_per_epoch = self.num_training_batches // self.val_check_batch
val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
total_val_batches = self.num_val_batches * val_checks_per_epoch
# total batches includes multiple val checks
self.total_batches = self.num_training_batches + total_val_batches
self.batch_loss_value = 0 # accumulated grads
if self.is_iterable_train_dataloader:
# for iterable train loader, the progress bar never ends
num_iterations = None
else:
num_iterations = self.total_batches
# reset progress bar
# .reset() doesn't work on disabled progress bar so we should check
desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
self.main_progress_bar.set_description(desc)
# changing gradient according accumulation_scheduler
self.accumulation_scheduler.on_epoch_begin(epoch, self)
# -----------------
# RUN TNG EPOCH
# -----------------
self.run_training_epoch()
# update LR schedulers
if self.lr_schedulers is not None:
for lr_scheduler in self.lr_schedulers:
lr_scheduler.step(epoch=self.current_epoch)
self.main_progress_bar.close()
model.on_train_end()
if self.logger is not None:
self.logger.finalize("success")
def run_training_epoch(self):
# before epoch hook
if self.is_function_implemented('on_epoch_start'):
model = self.get_model()
model.on_epoch_start()
# run epoch
for batch_idx, batch in enumerate(self.get_train_dataloader()):
# stop epoch if we limited the number of training batches
if batch_idx >= self.num_training_batches:
break
self.batch_idx = batch_idx
model = self.get_model()
model.global_step = self.global_step
# ---------------
# RUN TRAIN STEP
# ---------------
output = self.run_training_batch(batch, batch_idx)
batch_result, grad_norm_dic, batch_step_metrics = output
# when returning -1 from train_step, we end epoch early
early_stop_epoch = batch_result == -1
# ---------------
# RUN VAL STEP
# ---------------
should_check_val = (
not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
self.fisrt_epoch = False
if should_check_val:
self.run_evaluation(test=self.testing)
# when logs should be saved
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
if should_save_log:
if self.proc_rank == 0 and self.logger is not None:
self.logger.save()
# when metrics should be logged
should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
if should_log_metrics:
# logs user requested information to logger
self.log_metrics(batch_step_metrics, grad_norm_dic)
self.global_step += 1
self.total_batch_idx += 1
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if early_stop_epoch:
break
if self.global_step > self.max_updates:
print("| Training end..")
exit()
# epoch end hook
if self.is_function_implemented('on_epoch_end'):
model = self.get_model()
model.on_epoch_end()
def run_training_batch(self, batch, batch_idx):
# track grad norms
grad_norm_dic = {}
# track all metrics for callbacks
all_callback_metrics = []
# track metrics to log
all_log_metrics = []
if batch is None:
return 0, grad_norm_dic, {}
# hook
if self.is_function_implemented('on_batch_start'):
model_ref = self.get_model()
response = model_ref.on_batch_start(batch)
if response == -1:
return -1, grad_norm_dic, {}
splits = [batch]
self.hiddens = None
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
# call training_step once per optimizer
for opt_idx, optimizer in enumerate(self.optimizers):
if optimizer is None:
continue
# make sure only the gradients of the current optimizer's paramaters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if len(self.optimizers) > 1:
for param in self.get_model().parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
# wrap the forward step in a closure so second order methods work
def optimizer_closure():
# forward pass
output = self.training_forward(
split_batch, batch_idx, opt_idx, self.hiddens)
closure_loss = output[0]
progress_bar_metrics = output[1]
log_metrics = output[2]
callback_metrics = output[3]
self.hiddens = output[4]
if closure_loss is None:
return None
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
closure_loss = closure_loss / self.accumulate_grad_batches
# backward pass
model_ref = self.get_model()
if closure_loss.requires_grad:
model_ref.backward(closure_loss, optimizer)
# track metrics for callbacks
all_callback_metrics.append(callback_metrics)
# track progress bar metrics
self.add_tqdm_metrics(progress_bar_metrics)
all_log_metrics.append(log_metrics)
# insert after step hook
if self.is_function_implemented('on_after_backward'):
model_ref = self.get_model()
model_ref.on_after_backward()
return closure_loss
# calculate loss
loss = optimizer_closure()
if loss is None:
continue
# nan grads
if self.print_nan_grads:
self.print_nan_gradients()
# track total loss for logging (avoid mem leaks)
self.batch_loss_value += loss.item()
# gradient update with accumulated gradients
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
# track gradient norms when requested
if batch_idx % self.row_log_interval == 0:
if self.track_grad_norm > 0:
model = self.get_model()
grad_norm_dic = model.grad_norm(
self.track_grad_norm)
# clip gradients
self.clip_gradients()
# calls .step(), .zero_grad()
# override function to modify this behavior
model = self.get_model()
model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
# calculate running loss for display
self.running_loss.append(self.batch_loss_value)
self.batch_loss_value = 0
self.avg_loss = np.mean(self.running_loss[-100:])
# activate batch end hook
if self.is_function_implemented('on_batch_end'):
model = self.get_model()
model.on_batch_end()
# update progress bar
self.main_progress_bar.update(1)
self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
# collapse all metrics into one dict
all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
# track all metrics for callbacks
self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
return 0, grad_norm_dic, all_log_metrics
def training_forward(self, batch, batch_idx, opt_idx, hiddens):
"""
Handle forward for each training case (distributed, single gpu, etc...)
:param batch:
:param batch_idx:
:return:
"""
# ---------------
# FORWARD
# ---------------
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx, opt_idx]
# distributed forward
if self.use_ddp or self.use_dp:
output = self.model(*args)
# single GPU forward
elif self.single_gpu:
gpu_id = 0
if isinstance(self.data_parallel_device_ids, list):
gpu_id = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
args[0] = batch
output = self.model.training_step(*args)
# CPU forward
else:
output = self.model.training_step(*args)
# allow any mode to define training_end
model_ref = self.get_model()
output_ = model_ref.training_end(output)
if output_ is not None:
output = output_
# format and reduce outputs accordingly
output = self.process_output(output, train=True)
return output
# ---------------
# Utils
# ---------------
def is_function_implemented(self, f_name):
model = self.get_model()
f_op = getattr(model, f_name, None)
return callable(f_op)
def _percent_range_check(self, name):
value = getattr(self, name)
msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
if name == "val_check_interval":
msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
if not 0. <= value <= 1.:
raise ValueError(msg)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/pl_utils.py |
import glob
import logging
import re
import time
from collections import defaultdict
import os
import sys
import shutil
import types
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn
def tensors_to_scalars(metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if type(v) is dict:
v = tensors_to_scalars(v)
new_metrics[k] = v
return new_metrics
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if shift_right:
dst[1:] = src[:-1]
dst[0] = shift_id
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None):
"""Convert a list of 2d tensors into a padded 3d tensor."""
size = max(v.size(0) for v in values) if max_len is None else max_len
res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if shift_right:
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
if len(batch) == 0:
return 0
if len(batch) == max_sentences:
return 1
if num_tokens > max_tokens:
return 1
return 0
def batch_by_size(
indices, num_tokens_fn, max_tokens=None, max_sentences=None,
required_batch_size_multiple=1, distributed=False
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
"""
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=-1)
sample_len = 0
sample_lens = []
batch = []
batches = []
for i in range(len(indices)):
idx = indices[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
assert sample_len <= max_tokens, (
"sentence at index {} of size {} exceeds max_tokens "
"limit of {}!".format(idx, sample_len, max_tokens)
)
num_tokens = (len(batch) + 1) * sample_len
if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
mod_len = max(
bsz_mult * (len(batch) // bsz_mult),
len(batch) % bsz_mult,
)
batches.append(batch[:mod_len])
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
batches.append(batch)
return batches
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx
def softmax(x, dim):
return F.softmax(x, dim=dim, dtype=torch.float32)
def unpack_dict_to_list(samples):
samples_ = []
bsz = samples.get('outputs').size(0)
for i in range(bsz):
res = {}
for k, v in samples.items():
try:
res[k] = v[i]
except:
pass
samples_.append(res)
return samples_
def load_ckpt(cur_model, ckpt_base_dir, prefix_in_ckpt='model', force=True, strict=True):
if os.path.isfile(ckpt_base_dir):
base_dir = os.path.dirname(ckpt_base_dir)
checkpoint_path = [ckpt_base_dir]
else:
base_dir = ckpt_base_dir
checkpoint_path = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))
if len(checkpoint_path) > 0:
checkpoint_path = checkpoint_path[-1]
state_dict = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
state_dict = {k[len(prefix_in_ckpt) + 1:]: v for k, v in state_dict.items()
if k.startswith(f'{prefix_in_ckpt}.')}
if not strict:
cur_model_state_dict = cur_model.state_dict()
unmatched_keys = []
for key, param in state_dict.items():
if key in cur_model_state_dict:
new_param = cur_model_state_dict[key]
if new_param.shape != param.shape:
unmatched_keys.append(key)
print("| Unmatched keys: ", key, new_param.shape, param.shape)
for key in unmatched_keys:
del state_dict[key]
cur_model.load_state_dict(state_dict, strict=strict)
print(f"| load '{prefix_in_ckpt}' from '{checkpoint_path}'.")
else:
e_msg = f"| ckpt not found in {base_dir}."
if force:
assert False, e_msg
else:
print(e_msg)
def remove_padding(x, padding_idx=0):
if x is None:
return None
assert len(x.shape) in [1, 2]
if len(x.shape) == 2: # [T, H]
return x[np.abs(x).sum(-1) != padding_idx]
elif len(x.shape) == 1: # [T]
return x[x != padding_idx]
class Timer:
timer_map = {}
def __init__(self, name, print_time=False):
if name not in Timer.timer_map:
Timer.timer_map[name] = 0
self.name = name
self.print_time = print_time
def __enter__(self):
self.t = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
Timer.timer_map[self.name] += time.time() - self.t
if self.print_time:
print(self.name, Timer.timer_map[self.name])
def print_arch(model, model_name='model'):
print(f"| {model_name} Arch: ", model)
num_params(model, model_name=model_name)
def num_params(model, print_out=True, model_name="model"):
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print(f'| {model_name} Trainable Parameters: %.3fM' % parameters)
return parameters
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/__init__.py |
from collections import defaultdict
import torch
import torch.nn.functional as F
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx
def softmax(x, dim):
return F.softmax(x, dim=dim, dtype=torch.float32)
def sequence_mask(lengths, maxlen, dtype=torch.bool):
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
mask.type(dtype)
return mask
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def fill_with_neg_inf2(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(-1e8).type_as(t)
def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
focus_rate = attn.max(-1).values.sum(-1)
focus_rate = focus_rate / attn.sum(-1).sum(-1)
return focus_rate
def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None):
'''
attn: bs x L_t x L_s
'''
src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False)
if src_padding_mask is not None:
src_mask |= src_padding_mask
if src_seg_mask is not None:
src_mask |= src_seg_mask
attn = attn * (1 - src_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
phone_coverage_rate = attn.max(1).values.sum(-1)
# phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1)
phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1)
return phone_coverage_rate
def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None,
band_mask_factor=5, band_width=50):
'''
attn: bx x L_t x L_s
attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens
diagonal: y=k*x (k=attn_ks, x:output, y:input)
1 0 0
0 1 0
0 0 1
y>=k*(x-width) and y<=k*(x+width):1
else:0
'''
# width = min(target_len/band_mask_factor, 50)
width1 = target_len / band_mask_factor
width2 = target_len.new(target_len.size()).fill_(band_width)
width = torch.where(width1 < width2, width1, width2).float()
base = torch.ones(attn.size()).to(attn.device)
zero = torch.zeros(attn.size()).to(attn.device)
x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base
y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base
cond = (y - attn_ks[:, None, None] * x)
cond1 = cond + attn_ks[:, None, None] * width[:, None, None]
cond2 = cond - attn_ks[:, None, None] * width[:, None, None]
mask1 = torch.where(cond1 < 0, zero, base)
mask2 = torch.where(cond2 > 0, zero, base)
mask = mask1 * mask2
if src_padding_mask is not None:
attn = attn * (1 - src_padding_mask.float())[:, None, :]
if tgt_padding_mask is not None:
attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
diagonal_attn = attn * mask
diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1)
return diagonal_focus_rate, mask
def select_attn(attn_logits, type='best'):
"""
:param attn_logits: [n_layers, B, n_head, T_sp, T_txt]
:return:
"""
encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2)
# [n_layers * n_head, B, T_sp, T_txt]
encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1)
if type == 'best':
indices = encdec_attn.max(-1).values.sum(-1).argmax(0)
encdec_attn = encdec_attn.gather(
0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0]
return encdec_attn
elif type == 'mean':
return encdec_attn.mean(0)
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len).to(lengths.device)
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def group_hidden_by_segs(h, seg_ids, max_len):
"""
:param h: [B, T, H]
:param seg_ids: [B, T]
:return: h_ph: [B, T_ph, H]
"""
B, T, H = h.shape
h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
all_ones = h.new_ones(h.shape[:2])
cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
h_gby_segs = h_gby_segs[:, 1:]
cnt_gby_segs = cnt_gby_segs[:, 1:]
h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
return h_gby_segs, cnt_gby_segs
def mel2token_to_dur(mel2token, T_txt=None, max_dur=None):
is_torch = isinstance(mel2token, torch.Tensor)
has_batch_dim = True
if not is_torch:
mel2token = torch.LongTensor(mel2token)
if T_txt is None:
T_txt = mel2token.max()
if len(mel2token.shape) == 1:
mel2token = mel2token[None, ...]
has_batch_dim = False
B, _ = mel2token.shape
dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token))
dur = dur[:, 1:]
if max_dur is not None:
dur = dur.clamp(max=max_dur)
if not is_torch:
dur = dur.numpy()
if not has_batch_dim:
dur = dur[0]
return dur
def expand_word2ph(word_encoding, ph2word):
word_encoding = F.pad(word_encoding,[0,0,1,0])
ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]])
out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H]
return out | EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/tts_utils.py |
# coding=utf-8
# Authors:
# 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git)
# 2019.9 Jiayu DU
#
# requirements:
# - python 3.X
# notes: python 2.X WILL fail or produce misleading results
import sys, os, argparse, codecs, string, re
# ================================================================================ #
# basic constant
# ================================================================================ #
CHINESE_DIGIS = u'零一二三四五六七八九'
BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
ZERO_ALT = u'〇'
ONE_ALT = u'幺'
TWO_ALTS = [u'两', u'兩']
POSITIVE = [u'正', u'正']
NEGATIVE = [u'负', u'負']
POINT = [u'点', u'點']
# PLUS = [u'加', u'加']
# SIL = [u'杠', u'槓']
# 中文数字系统类型
NUMBERING_TYPES = ['low', 'mid', 'high']
CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \
'里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)'
CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)'
COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \
'砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \
'针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \
'毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \
'盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \
'纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)'
# punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git)
CHINESE_PUNC_STOP = '!?。。'
CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏'
CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP
# ================================================================================ #
# basic class
# ================================================================================ #
class ChineseChar(object):
"""
中文字符
每个字符对应简体和繁体,
e.g. 简体 = '负', 繁体 = '負'
转换时可转换为简体或繁体
"""
def __init__(self, simplified, traditional):
self.simplified = simplified
self.traditional = traditional
# self.__repr__ = self.__str__
def __str__(self):
return self.simplified or self.traditional or None
def __repr__(self):
return self.__str__()
class ChineseNumberUnit(ChineseChar):
"""
中文数字/数位字符
每个字符除繁简体外还有一个额外的大写字符
e.g. '陆' 和 '陸'
"""
def __init__(self, power, simplified, traditional, big_s, big_t):
super(ChineseNumberUnit, self).__init__(simplified, traditional)
self.power = power
self.big_s = big_s
self.big_t = big_t
def __str__(self):
return '10^{}'.format(self.power)
@classmethod
def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
if small_unit:
return ChineseNumberUnit(power=index + 1,
simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[0]:
return ChineseNumberUnit(power=index + 8,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[1]:
return ChineseNumberUnit(power=(index + 2) * 4,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[2]:
return ChineseNumberUnit(power=pow(2, index + 3),
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
else:
raise ValueError(
'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
class ChineseNumberDigit(ChineseChar):
"""
中文数字字符
"""
def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
super(ChineseNumberDigit, self).__init__(simplified, traditional)
self.value = value
self.big_s = big_s
self.big_t = big_t
self.alt_s = alt_s
self.alt_t = alt_t
def __str__(self):
return str(self.value)
@classmethod
def create(cls, i, v):
return ChineseNumberDigit(i, v[0], v[1], v[2], v[3])
class ChineseMath(ChineseChar):
"""
中文数位字符
"""
def __init__(self, simplified, traditional, symbol, expression=None):
super(ChineseMath, self).__init__(simplified, traditional)
self.symbol = symbol
self.expression = expression
self.big_s = simplified
self.big_t = traditional
CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath
class NumberSystem(object):
"""
中文数字系统
"""
pass
class MathSymbol(object):
"""
用于中文数字系统的数学符号 (繁/简体), e.g.
positive = ['正', '正']
negative = ['负', '負']
point = ['点', '點']
"""
def __init__(self, positive, negative, point):
self.positive = positive
self.negative = negative
self.point = point
def __iter__(self):
for v in self.__dict__.values():
yield v
# class OtherSymbol(object):
# """
# 其他符号
# """
#
# def __init__(self, sil):
# self.sil = sil
#
# def __iter__(self):
# for v in self.__dict__.values():
# yield v
# ================================================================================ #
# basic utils
# ================================================================================ #
def create_system(numbering_type=NUMBERING_TYPES[1]):
"""
根据数字系统类型返回创建相应的数字系统,默认为 mid
NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型
low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
返回对应的数字系统
"""
# chinese number units of '亿' and larger
all_larger_units = zip(
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
larger_units = [CNU.create(i, v, numbering_type, False)
for i, v in enumerate(all_larger_units)]
# chinese number units of '十, 百, 千, 万'
all_smaller_units = zip(
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
smaller_units = [CNU.create(i, v, small_unit=True)
for i, v in enumerate(all_smaller_units)]
# digis
chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT
digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
# symbols
positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
point_cn = CM(POINT[0], POINT[1], '.', lambda x,
y: float(str(x) + '.' + str(y)))
# sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y)))
system = NumberSystem()
system.units = smaller_units + larger_units
system.digits = digits
system.math = MathSymbol(positive_cn, negative_cn, point_cn)
# system.symbols = OtherSymbol(sil_cn)
return system
def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
def get_symbol(char, system):
for u in system.units:
if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
return u
for d in system.digits:
if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
return d
for m in system.math:
if char in [m.traditional, m.simplified]:
return m
def string2symbols(chinese_string, system):
int_string, dec_string = chinese_string, ''
for p in [system.math.point.simplified, system.math.point.traditional]:
if p in chinese_string:
int_string, dec_string = chinese_string.split(p)
break
return [get_symbol(c, system) for c in int_string], \
[get_symbol(c, system) for c in dec_string]
def correct_symbols(integer_symbols, system):
"""
一百八 to 一百八十
一亿一千三百万 to 一亿 一千万 三百万
"""
if integer_symbols and isinstance(integer_symbols[0], CNU):
if integer_symbols[0].power == 1:
integer_symbols = [system.digits[1]] + integer_symbols
if len(integer_symbols) > 1:
if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
integer_symbols.append(
CNU(integer_symbols[-2].power - 1, None, None, None, None))
result = []
unit_count = 0
for s in integer_symbols:
if isinstance(s, CND):
result.append(s)
unit_count = 0
elif isinstance(s, CNU):
current_unit = CNU(s.power, None, None, None, None)
unit_count += 1
if unit_count == 1:
result.append(current_unit)
elif unit_count > 1:
for i in range(len(result)):
if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
result[-i - 1] = CNU(result[-i - 1].power +
current_unit.power, None, None, None, None)
return result
def compute_value(integer_symbols):
"""
Compute the value.
When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
e.g. '两千万' = 2000 * 10000 not 2000 + 10000
"""
value = [0]
last_power = 0
for s in integer_symbols:
if isinstance(s, CND):
value[-1] = s.value
elif isinstance(s, CNU):
value[-1] *= pow(10, s.power)
if s.power > last_power:
value[:-1] = list(map(lambda v: v *
pow(10, s.power), value[:-1]))
last_power = s.power
value.append(0)
return sum(value)
system = create_system(numbering_type)
int_part, dec_part = string2symbols(chinese_string, system)
int_part = correct_symbols(int_part, system)
int_str = str(compute_value(int_part))
dec_str = ''.join([str(d.value) for d in dec_part])
if dec_part:
return '{0}.{1}'.format(int_str, dec_str)
else:
return int_str
def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False,
traditional=False, alt_zero=False, alt_one=False, alt_two=True,
use_zeros=True, use_units=True):
def get_value(value_string, use_zeros=True):
striped_string = value_string.lstrip('0')
# record nothing if all zeros
if not striped_string:
return []
# record one digits
elif len(striped_string) == 1:
if use_zeros and len(value_string) != len(striped_string):
return [system.digits[0], system.digits[int(striped_string)]]
else:
return [system.digits[int(striped_string)]]
# recursively record multiple digits
else:
result_unit = next(u for u in reversed(
system.units) if u.power < len(striped_string))
result_string = value_string[:-result_unit.power]
return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
system = create_system(numbering_type)
int_dec = number_string.split('.')
if len(int_dec) == 1:
int_string = int_dec[0]
dec_string = ""
elif len(int_dec) == 2:
int_string = int_dec[0]
dec_string = int_dec[1]
else:
raise ValueError(
"invalid input num string with more than one dot: {}".format(number_string))
if use_units and len(int_string) > 1:
result_symbols = get_value(int_string)
else:
result_symbols = [system.digits[int(c)] for c in int_string]
dec_symbols = [system.digits[int(c)] for c in dec_string]
if dec_string:
result_symbols += [system.math.point] + dec_symbols
if alt_two:
liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
system.digits[2].big_s, system.digits[2].big_t)
for i, v in enumerate(result_symbols):
if isinstance(v, CND) and v.value == 2:
next_symbol = result_symbols[i +
1] if i < len(result_symbols) - 1 else None
previous_symbol = result_symbols[i - 1] if i > 0 else None
if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
result_symbols[i] = liang
# if big is True, '两' will not be used and `alt_two` has no impact on output
if big:
attr_name = 'big_'
if traditional:
attr_name += 't'
else:
attr_name += 's'
else:
if traditional:
attr_name = 'traditional'
else:
attr_name = 'simplified'
result = ''.join([getattr(s, attr_name) for s in result_symbols])
# if not use_zeros:
# result = result.strip(getattr(system.digits[0], attr_name))
if alt_zero:
result = result.replace(
getattr(system.digits[0], attr_name), system.digits[0].alt_s)
if alt_one:
result = result.replace(
getattr(system.digits[1], attr_name), system.digits[1].alt_s)
for i, p in enumerate(POINT):
if result.startswith(p):
return CHINESE_DIGIS[0] + result
# ^10, 11, .., 19
if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0],
SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \
result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]:
result = result[1:]
return result
# ================================================================================ #
# different types of rewriters
# ================================================================================ #
class Cardinal:
"""
CARDINAL类
"""
def __init__(self, cardinal=None, chntext=None):
self.cardinal = cardinal
self.chntext = chntext
def chntext2cardinal(self):
return chn2num(self.chntext)
def cardinal2chntext(self):
return num2chn(self.cardinal)
class Digit:
"""
DIGIT类
"""
def __init__(self, digit=None, chntext=None):
self.digit = digit
self.chntext = chntext
# def chntext2digit(self):
# return chn2num(self.chntext)
def digit2chntext(self):
return num2chn(self.digit, alt_two=False, use_units=False)
class TelePhone:
"""
TELEPHONE类
"""
def __init__(self, telephone=None, raw_chntext=None, chntext=None):
self.telephone = telephone
self.raw_chntext = raw_chntext
self.chntext = chntext
# def chntext2telephone(self):
# sil_parts = self.raw_chntext.split('<SIL>')
# self.telephone = '-'.join([
# str(chn2num(p)) for p in sil_parts
# ])
# return self.telephone
def telephone2chntext(self, fixed=False):
if fixed:
sil_parts = self.telephone.split('-')
self.raw_chntext = '<SIL>'.join([
num2chn(part, alt_two=False, use_units=False) for part in sil_parts
])
self.chntext = self.raw_chntext.replace('<SIL>', '')
else:
sp_parts = self.telephone.strip('+').split()
self.raw_chntext = '<SP>'.join([
num2chn(part, alt_two=False, use_units=False) for part in sp_parts
])
self.chntext = self.raw_chntext.replace('<SP>', '')
return self.chntext
class Fraction:
"""
FRACTION类
"""
def __init__(self, fraction=None, chntext=None):
self.fraction = fraction
self.chntext = chntext
def chntext2fraction(self):
denominator, numerator = self.chntext.split('分之')
return chn2num(numerator) + '/' + chn2num(denominator)
def fraction2chntext(self):
numerator, denominator = self.fraction.split('/')
return num2chn(denominator) + '分之' + num2chn(numerator)
class Date:
"""
DATE类
"""
def __init__(self, date=None, chntext=None):
self.date = date
self.chntext = chntext
# def chntext2date(self):
# chntext = self.chntext
# try:
# year, other = chntext.strip().split('年', maxsplit=1)
# year = Digit(chntext=year).digit2chntext() + '年'
# except ValueError:
# other = chntext
# year = ''
# if other:
# try:
# month, day = other.strip().split('月', maxsplit=1)
# month = Cardinal(chntext=month).chntext2cardinal() + '月'
# except ValueError:
# day = chntext
# month = ''
# if day:
# day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1]
# else:
# month = ''
# day = ''
# date = year + month + day
# self.date = date
# return self.date
def date2chntext(self):
date = self.date
try:
year, other = date.strip().split('年', 1)
year = Digit(digit=year).digit2chntext() + '年'
except ValueError:
other = date
year = ''
if other:
try:
month, day = other.strip().split('月', 1)
month = Cardinal(cardinal=month).cardinal2chntext() + '月'
except ValueError:
day = date
month = ''
if day:
day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1]
else:
month = ''
day = ''
chntext = year + month + day
self.chntext = chntext
return self.chntext
class Money:
"""
MONEY类
"""
def __init__(self, money=None, chntext=None):
self.money = money
self.chntext = chntext
# def chntext2money(self):
# return self.money
def money2chntext(self):
money = self.money
pattern = re.compile(r'(\d+(\.\d+)?)')
matchers = pattern.findall(money)
if matchers:
for matcher in matchers:
money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext())
self.chntext = money
return self.chntext
class Percentage:
"""
PERCENTAGE类
"""
def __init__(self, percentage=None, chntext=None):
self.percentage = percentage
self.chntext = chntext
def chntext2percentage(self):
return chn2num(self.chntext.strip().strip('百分之')) + '%'
def percentage2chntext(self):
return '百分之' + num2chn(self.percentage.strip().strip('%'))
# ================================================================================ #
# NSW Normalizer
# ================================================================================ #
class NSWNormalizer:
def __init__(self, raw_text):
self.raw_text = '^' + raw_text + '$'
self.norm_text = ''
def _particular(self):
text = self.norm_text
pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))")
matchers = pattern.findall(text)
if matchers:
# print('particular')
for matcher in matchers:
text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1)
self.norm_text = text
return self.norm_text
def normalize(self, remove_punc=True):
text = self.raw_text
# 规范化日期
pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)")
matchers = pattern.findall(text)
if matchers:
# print('date')
for matcher in matchers:
text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1)
# 规范化金钱
pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)")
matchers = pattern.findall(text)
if matchers:
# print('money')
for matcher in matchers:
text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1)
# 规范化固话/手机号码
# 手机
# http://www.jihaoba.com/news/show/13680
# 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
# 联通:130、131、132、156、155、186、185、176
# 电信:133、153、189、180、181、177
pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D")
matchers = pattern.findall(text)
if matchers:
# print('telephone')
for matcher in matchers:
text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1)
# 固话
pattern = re.compile(r"\D((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D")
matchers = pattern.findall(text)
if matchers:
# print('fixed telephone')
for matcher in matchers:
text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1)
# 规范化分数
pattern = re.compile(r"(\d+/\d+)")
matchers = pattern.findall(text)
if matchers:
# print('fraction')
for matcher in matchers:
text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1)
# 规范化百分数
text = text.replace('%', '%')
pattern = re.compile(r"(\d+(\.\d+)?%)")
matchers = pattern.findall(text)
if matchers:
# print('percentage')
for matcher in matchers:
text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1)
# 规范化纯数+量词
pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS)
matchers = pattern.findall(text)
if matchers:
# print('cardinal+quantifier')
for matcher in matchers:
text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
# 规范化数字编号
pattern = re.compile(r"(\d{4,32})")
matchers = pattern.findall(text)
if matchers:
# print('digit')
for matcher in matchers:
text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1)
# 规范化纯数
pattern = re.compile(r"(\d+(\.\d+)?)")
matchers = pattern.findall(text)
if matchers:
# print('cardinal')
for matcher in matchers:
text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
self.norm_text = text
self._particular()
text = self.norm_text.lstrip('^').rstrip('$')
if remove_punc:
# Punctuations removal
old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations
new_chars = ' ' * len(old_chars)
del_chars = ''
text = text.translate(str.maketrans(old_chars, new_chars, del_chars))
return text
def nsw_test_case(raw_text):
print('I:' + raw_text)
print('O:' + NSWNormalizer(raw_text).normalize())
print('')
def nsw_test():
nsw_test_case('固话:0595-23865596或23880880。')
nsw_test_case('固话:0595-23865596或23880880。')
nsw_test_case('手机:+86 19859213959或15659451527。')
nsw_test_case('分数:32477/76391。')
nsw_test_case('百分数:80.03%。')
nsw_test_case('编号:31520181154418。')
nsw_test_case('纯数:2983.07克或12345.60米。')
nsw_test_case('日期:1999年2月20日或09年3月15号。')
nsw_test_case('金钱:12块5,34.5元,20.1万')
nsw_test_case('特殊:O2O或B2C。')
nsw_test_case('3456万吨')
nsw_test_case('2938个')
nsw_test_case('938')
nsw_test_case('今天吃了115个小笼包231个馒头')
nsw_test_case('有62%的概率')
if __name__ == '__main__':
# nsw_test()
p = argparse.ArgumentParser()
p.add_argument('ifile', help='input filename, assume utf-8 encoding')
p.add_argument('ofile', help='output filename')
p.add_argument('--to_upper', action='store_true', help='convert to upper case')
p.add_argument('--to_lower', action='store_true', help='convert to lower case')
p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.")
p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines')
args = p.parse_args()
ifile = codecs.open(args.ifile, 'r', 'utf8')
ofile = codecs.open(args.ofile, 'w+', 'utf8')
n = 0
for l in ifile:
key = ''
text = ''
if args.has_key:
cols = l.split(maxsplit=1)
key = cols[0]
if len(cols) == 2:
text = cols[1]
else:
text = ''
else:
text = l
# cases
if args.to_upper and args.to_lower:
sys.stderr.write('text norm: to_upper OR to_lower?')
exit(1)
if args.to_upper:
text = text.upper()
if args.to_lower:
text = text.lower()
# NSW(Non-Standard-Word) normalization
text = NSWNormalizer(text).normalize()
#
if args.has_key:
ofile.write(key + '\t' + text)
else:
ofile.write(text)
n += 1
if n % args.log_interval == 0:
sys.stderr.write("text norm: {} lines done.\n".format(n))
sys.stderr.write("text norm: {} lines done in total.\n".format(n))
ifile.close()
ofile.close()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/text_norm.py |
import glob
import logging
import os
import re
import torch
def get_last_checkpoint(work_dir, steps=None):
checkpoint = None
last_ckpt_path = None
ckpt_paths = get_all_ckpts(work_dir, steps)
if len(ckpt_paths) > 0:
last_ckpt_path = ckpt_paths[0]
checkpoint = torch.load(last_ckpt_path, map_location='cpu')
logging.info(f'load module from checkpoint: {last_ckpt_path}')
return checkpoint, last_ckpt_path
def get_all_ckpts(work_dir, steps=None):
if steps is None:
ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt'
else:
ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt'
return sorted(glob.glob(ckpt_path_pattern),
key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True):
if os.path.isfile(ckpt_base_dir):
base_dir = os.path.dirname(ckpt_base_dir)
ckpt_path = ckpt_base_dir
checkpoint = torch.load(ckpt_base_dir, map_location='cpu')
else:
base_dir = ckpt_base_dir
checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir)
if checkpoint is not None:
state_dict = checkpoint["state_dict"]
if len([k for k in state_dict.keys() if '.' in k]) > 0:
state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items()
if k.startswith(f'{model_name}.')}
else:
if '.' not in model_name:
state_dict = state_dict[model_name]
else:
base_model_name = model_name.split('.')[0]
rest_model_name = model_name[len(base_model_name) + 1:]
state_dict = {
k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items()
if k.startswith(f'{rest_model_name}.')}
if not strict:
cur_model_state_dict = cur_model.state_dict()
unmatched_keys = []
for key, param in state_dict.items():
if key in cur_model_state_dict:
new_param = cur_model_state_dict[key]
if new_param.shape != param.shape:
unmatched_keys.append(key)
print("| Unmatched keys: ", key, new_param.shape, param.shape)
for key in unmatched_keys:
del state_dict[key]
cur_model.load_state_dict(state_dict, strict=strict)
print(f"| load '{model_name}' from '{ckpt_path}'.")
else:
e_msg = f"| ckpt not found in {base_dir}."
if force:
assert False, e_msg
else:
print(e_msg)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/ckpt_utils.py |
import re
import six
from six.moves import range # pylint: disable=redefined-builtin
PAD = "<pad>"
EOS = "<EOS>"
UNK = "<UNK>"
SEG = "|"
RESERVED_TOKENS = [PAD, EOS, UNK]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
self._num_reserved_ids = num_reserved_ids
@property
def num_reserved_ids(self):
return self._num_reserved_ids
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids))
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids]
@property
def vocab_size(self):
raise NotImplementedError()
class ByteTextEncoder(TextEncoder):
"""Encodes each byte to an id. For 8-bit strings only."""
def encode(self, s):
numres = self._num_reserved_ids
if six.PY2:
if isinstance(s, unicode):
s = s.encode("utf-8")
return [ord(c) + numres for c in s]
# Python3: explicitly convert to UTF-8
return [c + numres for c in s.encode("utf-8")]
def decode(self, ids, strip_extraneous=False):
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
if six.PY2:
return "".join(decoded_ids)
# Python3: join byte arrays and then decode string
return b"".join(decoded_ids).decode("utf-8", "replace")
def decode_list(self, ids):
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
# Python3: join byte arrays and then decode string
return decoded_ids
@property
def vocab_size(self):
return 2**8 + self._num_reserved_ids
class ByteTextEncoderWithEos(ByteTextEncoder):
"""Encodes each byte to an id and appends the EOS token."""
def encode(self, s):
return super(ByteTextEncoderWithEos, self).encode(s) + [EOS_ID]
class TokenTextEncoder(TextEncoder):
"""Encoder based on a user-supplied vocabulary (file or list)."""
def __init__(self,
vocab_filename,
reverse=False,
vocab_list=None,
replace_oov=None,
num_reserved_ids=NUM_RESERVED_TOKENS):
"""Initialize from a file or list, one token per line.
Handling of reserved tokens works as follows:
- When initializing from a list, we add reserved tokens to the vocab.
- When initializing from a file, we do not add reserved tokens to the vocab.
- When saving vocab files, we save reserved tokens to the file.
Args:
vocab_filename: If not None, the full filename to read vocab from. If this
is not None, then vocab_list should be None.
reverse: Boolean indicating if tokens should be reversed during encoding
and decoding.
vocab_list: If not None, a list of elements of the vocabulary. If this is
not None, then vocab_filename should be None.
replace_oov: If not None, every out-of-vocabulary token seen when
encoding will be replaced by this string (which must be in vocab).
num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.
"""
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
self._reverse = reverse
self._replace_oov = replace_oov
if vocab_filename:
self._init_vocab_from_file(vocab_filename)
else:
assert vocab_list is not None
self._init_vocab_from_list(vocab_list)
self.pad_index = self._token_to_id[PAD]
self.eos_index = self._token_to_id[EOS]
self.unk_index = self._token_to_id[UNK]
self.seg_index = self._token_to_id[SEG] if SEG in self._token_to_id else self.eos_index
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret
def decode(self, ids, strip_eos=False, strip_padding=False):
if strip_padding and self.pad() in list(ids):
pad_pos = list(ids).index(self.pad())
ids = ids[:pad_pos]
if strip_eos and self.eos() in list(ids):
eos_pos = list(ids).index(self.eos())
ids = ids[:eos_pos]
return " ".join(self.decode_list(ids))
def decode_list(self, ids):
seq = reversed(ids) if self._reverse else ids
return [self._safe_id_to_token(i) for i in seq]
@property
def vocab_size(self):
return len(self._id_to_token)
def __len__(self):
return self.vocab_size
def _safe_id_to_token(self, idx):
return self._id_to_token.get(idx, "ID_%d" % idx)
def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False)
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen())
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token))
def pad(self):
return self.pad_index
def eos(self):
return self.eos_index
def unk(self):
return self.unk_index
def seg(self):
return self.seg_index
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n")
def sil_phonemes(self):
return [p for p in self._id_to_token.values() if not p[0].isalpha()]
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/text_encoder.py |
from utils.hparams import hparams
class RSQRTSchedule(object):
def __init__(self, optimizer):
super().__init__()
self.optimizer = optimizer
self.constant_lr = hparams['lr']
self.warmup_updates = hparams['warmup_updates']
self.hidden_size = hparams['hidden_size']
self.lr = hparams['lr']
for param_group in optimizer.param_groups:
param_group['lr'] = self.lr
self.step(0)
def step(self, num_updates):
constant_lr = self.constant_lr
warmup = min(num_updates / self.warmup_updates, 1.0)
rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5
rsqrt_hidden = self.hidden_size ** -0.5
self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-7)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
return self.lr
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/training_utils.py |
#########
# world
##########
import librosa
import numpy as np
import torch
gamma = 0
mcepInput = 3 # 0 for dB, 3 for magnitude
alpha = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def norm_f0(f0, uv, hparams):
is_torch = isinstance(f0, torch.Tensor)
if hparams['pitch_norm'] == 'standard':
f0 = (f0 - hparams['f0_mean']) / hparams['f0_std']
if hparams['pitch_norm'] == 'log':
f0 = torch.log2(f0) if is_torch else np.log2(f0)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
return f0
def norm_interp_f0(f0, hparams):
is_torch = isinstance(f0, torch.Tensor)
if is_torch:
device = f0.device
f0 = f0.data.cpu().numpy()
uv = f0 == 0
f0 = norm_f0(f0, uv, hparams)
if sum(uv) == len(f0):
f0[uv] = 0
elif sum(uv) > 0:
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
uv = torch.FloatTensor(uv)
f0 = torch.FloatTensor(f0)
if is_torch:
f0 = f0.to(device)
return f0, uv
def denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None):
if hparams['pitch_norm'] == 'standard':
f0 = f0 * hparams['f0_std'] + hparams['f0_mean']
if hparams['pitch_norm'] == 'log':
f0 = 2 ** f0
if min is not None:
f0 = f0.clamp(min=min)
if max is not None:
f0 = f0.clamp(max=max)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
if pitch_padding is not None:
f0[pitch_padding] = 0
return f0
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/pitch_utils.py |
import pickle
from copy import deepcopy
import numpy as np
class IndexedDataset:
def __init__(self, path, num_cache=1):
super().__init__()
self.path = path
self.data_file = None
self.data_offsets = np.load(f"{path}.idx", allow_pickle=True).item()['offsets']
self.data_file = open(f"{path}.data", 'rb', buffering=-1)
self.cache = []
self.num_cache = num_cache
def check_index(self, i):
if i < 0 or i >= len(self.data_offsets) - 1:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
def __getitem__(self, i):
self.check_index(i)
if self.num_cache > 0:
for c in self.cache:
if c[0] == i:
return c[1]
self.data_file.seek(self.data_offsets[i])
b = self.data_file.read(self.data_offsets[i + 1] - self.data_offsets[i])
item = pickle.loads(b)
if self.num_cache > 0:
self.cache = [(i, deepcopy(item))] + self.cache[:-1]
return item
def __len__(self):
return len(self.data_offsets) - 1
class IndexedDatasetBuilder:
def __init__(self, path):
self.path = path
self.out_file = open(f"{path}.data", 'wb')
self.byte_offsets = [0]
def add_item(self, item):
s = pickle.dumps(item)
bytes = self.out_file.write(s)
self.byte_offsets.append(self.byte_offsets[-1] + bytes)
def finalize(self):
self.out_file.close()
np.save(open(f"{self.path}.idx", 'wb'), {'offsets': self.byte_offsets})
if __name__ == "__main__":
import random
from tqdm import tqdm
ds_path = '/tmp/indexed_ds_example'
size = 100
items = [{"a": np.random.normal(size=[10000, 10]),
"b": np.random.normal(size=[10000, 10])} for i in range(size)]
builder = IndexedDatasetBuilder(ds_path)
for i in tqdm(range(size)):
builder.add_item(items[i])
builder.finalize()
ds = IndexedDataset(ds_path)
for i in tqdm(range(10000)):
idx = random.randint(0, size - 1)
assert (ds[idx]['a'] == items[idx]['a']).all()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/indexed_datasets.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.