python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""AudioGen grids."""
| audiocraft-main | audiocraft/grids/audiogen/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ..musicgen._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=64, partition=partitions)
launcher.bind_(solver='audiogen/audiogen_base_16khz')
# replace this by the desired environmental sound dataset
launcher.bind_(dset='internal/sounds_16khz')
fsdp = {'autocast': False, 'fsdp.use': True}
medium = {'model/lm/model_scale': 'medium'}
launcher.bind_(fsdp)
launcher(medium)
| audiocraft-main | audiocraft/grids/audiogen/audiogen_base_16khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluation with objective metrics for the pretrained AudioGen models.
This grid takes signature from the training grid and runs evaluation-only stage.
When running the grid for the first time, please use:
REGEN=1 dora grid audiogen.audiogen_pretrained_16khz_eval
and re-use the REGEN=1 option when the grid is changed to force regenerating it.
Note that you need the proper metrics external libraries setup to use all
the objective metrics activated in this grid. Refer to the README for more information.
"""
import os
from ..musicgen._explorers import GenerationEvalExplorer
from ...environment import AudioCraftEnvironment
from ... import train
def eval(launcher, batch_size: int = 32):
opts = {
'dset': 'audio/audiocaps_16khz',
'solver/audiogen/evaluation': 'objective_eval',
'execute_only': 'evaluate',
'+dataset.evaluate.batch_size': batch_size,
'+metrics.fad.tf.batch_size': 32,
}
# binary for FAD computation: replace this path with your own path
metrics_opts = {
'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research'
}
opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.}
opt2 = {'transformer_lm.two_step_cfg': True}
sub = launcher.bind(opts)
sub.bind_(metrics_opts)
# base objective metrics
sub(opt1, opt2)
@GenerationEvalExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=4, partition=partitions)
if 'REGEN' not in os.environ:
folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1]
with launcher.job_array():
for sig in folder.iterdir():
if not sig.is_symlink():
continue
xp = train.main.get_xp_from_sig(sig.name)
launcher(xp.argv)
return
audiogen_base = launcher.bind(solver="audiogen/audiogen_base_16khz")
audiogen_base.bind_({'autocast': False, 'fsdp.use': True})
audiogen_base_medium = audiogen_base.bind({'continue_from': '//pretrained/facebook/audiogen-medium'})
audiogen_base_medium.bind_({'model/lm/model_scale': 'medium'})
eval(audiogen_base_medium, batch_size=128)
| audiocraft-main | audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
All the functions to build the relevant models and modules
from the Hydra config.
"""
import typing as tp
import audiocraft
import omegaconf
import torch
from .encodec import CompressionModel, EncodecModel
from .lm import LMModel
from ..modules.codebooks_patterns import (
CodebooksPatternProvider,
DelayedPatternProvider,
MusicLMPattern,
ParallelPatternProvider,
UnrolledPatternProvider,
VALLEPattern,
)
from ..modules.conditioners import (
BaseConditioner,
ChromaStemConditioner,
CLAPEmbeddingConditioner,
ConditionFuser,
ConditioningProvider,
LUTConditioner,
T5Conditioner,
)
from .unet import DiffusionUnet
from .. import quantization as qt
from ..utils.utils import dict_from_config
from ..modules.diffusion_schedule import MultiBandProcessor, SampleProcessor
def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
klass = {
'no_quant': qt.DummyQuantizer,
'rvq': qt.ResidualVectorQuantizer
}[quantizer]
kwargs = dict_from_config(getattr(cfg, quantizer))
if quantizer != 'no_quant':
kwargs['dimension'] = dimension
return klass(**kwargs)
def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
if encoder_name == 'seanet':
kwargs = dict_from_config(getattr(cfg, 'seanet'))
encoder_override_kwargs = kwargs.pop('encoder')
decoder_override_kwargs = kwargs.pop('decoder')
encoder_kwargs = {**kwargs, **encoder_override_kwargs}
decoder_kwargs = {**kwargs, **decoder_override_kwargs}
encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
return encoder, decoder
else:
raise KeyError(f"Unexpected compression model {cfg.compression_model}")
def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
"""Instantiate a compression model."""
if cfg.compression_model == 'encodec':
kwargs = dict_from_config(getattr(cfg, 'encodec'))
encoder_name = kwargs.pop('autoencoder')
quantizer_name = kwargs.pop('quantizer')
encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
frame_rate = kwargs['sample_rate'] // encoder.hop_length
renormalize = kwargs.pop('renormalize', False)
# deprecated params
kwargs.pop('renorm', None)
return EncodecModel(encoder, decoder, quantizer,
frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
else:
raise KeyError(f"Unexpected compression model {cfg.compression_model}")
def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
"""Instantiate a transformer LM."""
if cfg.lm_model == 'transformer_lm':
kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
n_q = kwargs['n_q']
q_modeling = kwargs.pop('q_modeling', None)
codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']
fuser = get_condition_fuser(cfg)
condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically
kwargs['cross_attention'] = True
if codebooks_pattern_cfg.modeling is None:
assert q_modeling is not None, \
"LM model should either have a codebook pattern defined or transformer_lm.q_modeling"
codebooks_pattern_cfg = omegaconf.OmegaConf.create(
{'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
)
pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
return LMModel(
pattern_provider=pattern_provider,
condition_provider=condition_provider,
fuser=fuser,
cfg_dropout=cfg_prob,
cfg_coef=cfg_coef,
attribute_dropout=attribute_dropout,
dtype=getattr(torch, cfg.dtype),
device=cfg.device,
**kwargs
).to(cfg.device)
else:
raise KeyError(f"Unexpected LM model {cfg.lm_model}")
def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
"""Instantiate a conditioning model."""
device = cfg.device
duration = cfg.dataset.segment_duration
cfg = getattr(cfg, 'conditioners')
dict_cfg = {} if cfg is None else dict_from_config(cfg)
conditioners: tp.Dict[str, BaseConditioner] = {}
condition_provider_args = dict_cfg.pop('args', {})
condition_provider_args.pop('merge_text_conditions_p', None)
condition_provider_args.pop('drop_desc_p', None)
for cond, cond_cfg in dict_cfg.items():
model_type = cond_cfg['model']
model_args = cond_cfg[model_type]
if model_type == 't5':
conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
elif model_type == 'lut':
conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
elif model_type == 'chroma_stem':
conditioners[str(cond)] = ChromaStemConditioner(
output_dim=output_dim,
duration=duration,
device=device,
**model_args
)
elif model_type == 'clap':
conditioners[str(cond)] = CLAPEmbeddingConditioner(
output_dim=output_dim,
device=device,
**model_args
)
else:
raise ValueError(f"Unrecognized conditioning model: {model_type}")
conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
return conditioner
def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
"""Instantiate a condition fuser object."""
fuser_cfg = getattr(cfg, 'fuser')
fuser_methods = ['sum', 'cross', 'prepend', 'input_interpolate']
fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
return fuser
def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
"""Instantiate a codebooks pattern provider object."""
pattern_providers = {
'parallel': ParallelPatternProvider,
'delay': DelayedPatternProvider,
'unroll': UnrolledPatternProvider,
'valle': VALLEPattern,
'musiclm': MusicLMPattern,
}
name = cfg.modeling
kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
klass = pattern_providers[name]
return klass(n_q, **kwargs)
def get_debug_compression_model(device='cpu', sample_rate: int = 32000):
"""Instantiate a debug compression model to be used for unit tests."""
assert sample_rate in [16000, 32000], "unsupported sample rate for debug compression model"
model_ratios = {
16000: [10, 8, 8], # 25 Hz at 16kHz
32000: [10, 8, 16] # 25 Hz at 32kHz
}
ratios: tp.List[int] = model_ratios[sample_rate]
frame_rate = 25
seanet_kwargs: dict = {
'n_filters': 4,
'n_residual_layers': 1,
'dimension': 32,
'ratios': ratios,
}
print(seanet_kwargs)
encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
init_x = torch.randn(8, 32, 128)
quantizer(init_x, 1) # initialize kmeans etc.
compression_model = EncodecModel(
encoder, decoder, quantizer,
frame_rate=frame_rate, sample_rate=sample_rate, channels=1).to(device)
return compression_model.eval()
def get_diffusion_model(cfg: omegaconf.DictConfig):
# TODO Find a way to infer the channels from dset
channels = cfg.channels
num_steps = cfg.schedule.num_steps
return DiffusionUnet(
chin=channels, num_steps=num_steps, **cfg.diffusion_unet)
def get_processor(cfg, sample_rate: int = 24000):
sample_processor = SampleProcessor()
if cfg.use:
kw = dict(cfg)
kw.pop('use')
kw.pop('name')
if cfg.name == "multi_band_processor":
sample_processor = MultiBandProcessor(sample_rate=sample_rate, **kw)
return sample_processor
def get_debug_lm_model(device='cpu'):
"""Instantiate a debug LM to be used for unit tests."""
pattern = DelayedPatternProvider(n_q=4)
dim = 16
providers = {
'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
}
condition_provider = ConditioningProvider(providers)
fuser = ConditionFuser(
{'cross': ['description'], 'prepend': [],
'sum': [], 'input_interpolate': []})
lm = LMModel(
pattern, condition_provider, fuser,
n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
cross_attention=True, causal=True)
return lm.to(device).eval()
def get_wrapped_compression_model(
compression_model: CompressionModel,
cfg: omegaconf.DictConfig) -> CompressionModel:
# more to come.
return compression_model
| audiocraft-main | audiocraft/models/builders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Main model for using MusicGen. This will combine all the required components
and provide easy access to the generation API.
"""
import typing as tp
import warnings
import torch
from .encodec import CompressionModel
from .lm import LMModel
from .builders import get_debug_compression_model, get_debug_lm_model
from .loaders import load_compression_model, load_lm_model
from ..data.audio_utils import convert_audio
from ..modules.conditioners import ConditioningAttributes, WavCondition
from ..utils.autocast import TorchAutocast
MelodyList = tp.List[tp.Optional[torch.Tensor]]
MelodyType = tp.Union[torch.Tensor, MelodyList]
# backward compatible names mapping
_HF_MODEL_CHECKPOINTS_MAP = {
"small": "facebook/musicgen-small",
"medium": "facebook/musicgen-medium",
"large": "facebook/musicgen-large",
"melody": "facebook/musicgen-melody",
}
class MusicGen:
"""MusicGen main model with convenient generation API.
Args:
name (str): name of the model.
compression_model (CompressionModel): Compression model
used to map audio to invertible discrete representations.
lm (LMModel): Language model over discrete representations.
max_duration (float, optional): maximum duration the model can produce,
otherwise, inferred from the training params.
"""
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
max_duration: tp.Optional[float] = None):
self.name = name
self.compression_model = compression_model
self.lm = lm
if max_duration is None:
if hasattr(lm, 'cfg'):
max_duration = lm.cfg.dataset.segment_duration # type: ignore
else:
raise ValueError("You must provide max_duration when building directly MusicGen")
assert max_duration is not None
self.max_duration: float = max_duration
self.device = next(iter(lm.parameters())).device
self.generation_params: dict = {}
self.set_generation_params(duration=15) # 15 seconds by default
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
if self.device.type == 'cpu':
self.autocast = TorchAutocast(enabled=False)
else:
self.autocast = TorchAutocast(
enabled=True, device_type=self.device.type, dtype=torch.float16)
@property
def frame_rate(self) -> float:
"""Roughly the number of AR steps per seconds."""
return self.compression_model.frame_rate
@property
def sample_rate(self) -> int:
"""Sample rate of the generated audio."""
return self.compression_model.sample_rate
@property
def audio_channels(self) -> int:
"""Audio channels of the generated audio."""
return self.compression_model.channels
@staticmethod
def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):
"""Return pretrained model, we provide four models:
- facebook/musicgen-small (300M), text to music,
# see: https://huggingface.co/facebook/musicgen-small
- facebook/musicgen-medium (1.5B), text to music,
# see: https://huggingface.co/facebook/musicgen-medium
- facebook/musicgen-melody (1.5B) text to music and text+melody to music,
# see: https://huggingface.co/facebook/musicgen-melody
- facebook/musicgen-large (3.3B), text to music,
# see: https://huggingface.co/facebook/musicgen-large
"""
if device is None:
if torch.cuda.device_count():
device = 'cuda'
else:
device = 'cpu'
if name == 'debug':
# used only for unit tests
compression_model = get_debug_compression_model(device)
lm = get_debug_lm_model(device)
return MusicGen(name, compression_model, lm, max_duration=30)
if name in _HF_MODEL_CHECKPOINTS_MAP:
warnings.warn(
"MusicGen pretrained model relying on deprecated checkpoint mapping. " +
f"Please use full pre-trained id instead: facebook/musicgen-{name}")
name = _HF_MODEL_CHECKPOINTS_MAP[name]
lm = load_lm_model(name, device=device)
compression_model = load_compression_model(name, device=device)
if 'self_wav' in lm.condition_provider.conditioners:
lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True
return MusicGen(name, compression_model, lm)
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
top_p: float = 0.0, temperature: float = 1.0,
duration: float = 30.0, cfg_coef: float = 3.0,
two_step_cfg: bool = False, extend_stride: float = 18):
"""Set the generation parameters for MusicGen.
Args:
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
top_k (int, optional): top_k used for sampling. Defaults to 250.
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
duration (float, optional): Duration of the generated waveform. Defaults to 30.0.
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
instead of batching together the two. This has some impact on how things
are padded but seems to have little impact in practice.
extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much
should we extend the audio each time. Larger values will mean less context is
preserved, and shorter value will require extra computations.
"""
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
self.extend_stride = extend_stride
self.duration = duration
self.generation_params = {
'use_sampling': use_sampling,
'temp': temperature,
'top_k': top_k,
'top_p': top_p,
'cfg_coef': cfg_coef,
'two_step_cfg': two_step_cfg,
}
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
"""Override the default progress callback."""
self._progress_callback = progress_callback
def generate_unconditional(self, num_samples: int, progress: bool = False,
return_tokens: bool = False) -> tp.Union[torch.Tensor,
tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples in an unconditional manner.
Args:
num_samples (int): Number of samples to be generated.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
descriptions: tp.List[tp.Optional[str]] = [None] * num_samples
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on text.
Args:
descriptions (list of str): A list of strings used as text conditioning.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
assert prompt_tokens is None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,
melody_sample_rate: int, progress: bool = False,
return_tokens: bool = False) -> tp.Union[torch.Tensor,
tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on text and melody.
Args:
descriptions (list of str): A list of strings used as text conditioning.
melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as
melody conditioning. Should have shape [B, C, T] with B matching the description length,
C=1 or 2. It can be [C, T] if there is a single description. It can also be
a list of [C, T] tensors.
melody_sample_rate: (int): Sample rate of the melody waveforms.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if isinstance(melody_wavs, torch.Tensor):
if melody_wavs.dim() == 2:
melody_wavs = melody_wavs[None]
if melody_wavs.dim() != 3:
raise ValueError("Melody wavs should have a shape [B, C, T].")
melody_wavs = list(melody_wavs)
else:
for melody in melody_wavs:
if melody is not None:
assert melody.dim() == 2, "One melody in the list has the wrong number of dims."
melody_wavs = [
convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)
if wav is not None else None
for wav in melody_wavs]
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,
melody_wavs=melody_wavs)
assert prompt_tokens is None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
progress: bool = False, return_tokens: bool = False) \
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on audio prompts.
Args:
prompt (torch.Tensor): A batch of waveforms used for continuation.
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if prompt.dim() == 2:
prompt = prompt[None]
if prompt.dim() != 3:
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
if descriptions is None:
descriptions = [None] * len(prompt)
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
assert prompt_tokens is not None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
@torch.no_grad()
def _prepare_tokens_and_attributes(
self,
descriptions: tp.Sequence[tp.Optional[str]],
prompt: tp.Optional[torch.Tensor],
melody_wavs: tp.Optional[MelodyList] = None,
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
"""Prepare model inputs.
Args:
descriptions (list of str): A list of strings used as text conditioning.
prompt (torch.Tensor): A batch of waveforms used for continuation.
melody_wavs (torch.Tensor, optional): A batch of waveforms
used as melody conditioning. Defaults to None.
"""
attributes = [
ConditioningAttributes(text={'description': description})
for description in descriptions]
if melody_wavs is None:
for attr in attributes:
attr.wav['self_wav'] = WavCondition(
torch.zeros((1, 1, 1), device=self.device),
torch.tensor([0], device=self.device),
sample_rate=[self.sample_rate],
path=[None])
else:
if 'self_wav' not in self.lm.condition_provider.conditioners:
raise RuntimeError("This model doesn't support melody conditioning. "
"Use the `melody` model.")
assert len(melody_wavs) == len(descriptions), \
f"number of melody wavs must match number of descriptions! " \
f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}"
for attr, melody in zip(attributes, melody_wavs):
if melody is None:
attr.wav['self_wav'] = WavCondition(
torch.zeros((1, 1, 1), device=self.device),
torch.tensor([0], device=self.device),
sample_rate=[self.sample_rate],
path=[None])
else:
attr.wav['self_wav'] = WavCondition(
melody[None].to(device=self.device),
torch.tensor([melody.shape[-1]], device=self.device),
sample_rate=[self.sample_rate],
path=[None],
)
if prompt is not None:
if descriptions is not None:
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
prompt = prompt.to(self.device)
prompt_tokens, scale = self.compression_model.encode(prompt)
assert scale is None
else:
prompt_tokens = None
return attributes, prompt_tokens
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
"""Generate discrete audio tokens given audio prompt and/or conditions.
Args:
attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
Returns:
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
"""
total_gen_len = int(self.duration * self.frame_rate)
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
current_gen_offset: int = 0
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
generated_tokens += current_gen_offset
if self._progress_callback is not None:
# Note that total_gen_len might be quite wrong depending on the
# codebook pattern used, but with delay it is almost accurate.
self._progress_callback(generated_tokens, total_gen_len)
else:
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
if prompt_tokens is not None:
assert max_prompt_len >= prompt_tokens.shape[-1], \
"Prompt is longer than audio to generate"
callback = None
if progress:
callback = _progress_callback
if self.duration <= self.max_duration:
# generate by sampling from LM, simple case.
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
else:
# now this gets a bit messier, we need to handle prompts,
# melody conditioning etc.
ref_wavs = [attr.wav['self_wav'] for attr in attributes]
all_tokens = []
if prompt_tokens is None:
prompt_length = 0
else:
all_tokens.append(prompt_tokens)
prompt_length = prompt_tokens.shape[-1]
stride_tokens = int(self.frame_rate * self.extend_stride)
while current_gen_offset + prompt_length < total_gen_len:
time_offset = current_gen_offset / self.frame_rate
chunk_duration = min(self.duration - time_offset, self.max_duration)
max_gen_len = int(chunk_duration * self.frame_rate)
for attr, ref_wav in zip(attributes, ref_wavs):
wav_length = ref_wav.length.item()
if wav_length == 0:
continue
# We will extend the wav periodically if it not long enough.
# we have to do it here rather than in conditioners.py as otherwise
# we wouldn't have the full wav.
initial_position = int(time_offset * self.sample_rate)
wav_target_length = int(self.max_duration * self.sample_rate)
positions = torch.arange(initial_position,
initial_position + wav_target_length, device=self.device)
attr.wav['self_wav'] = WavCondition(
ref_wav[0][..., positions % wav_length],
torch.full_like(ref_wav[1], wav_target_length),
[self.sample_rate] * ref_wav[0].size(0),
[None], [0.])
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
if prompt_tokens is None:
all_tokens.append(gen_tokens)
else:
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
prompt_tokens = gen_tokens[:, :, stride_tokens:]
prompt_length = prompt_tokens.shape[-1]
current_gen_offset += stride_tokens
gen_tokens = torch.cat(all_tokens, dim=-1)
return gen_tokens
def generate_audio(self, gen_tokens: torch.Tensor):
"""Generate Audio from tokens"""
assert gen_tokens.dim() == 3
with torch.no_grad():
gen_audio = self.compression_model.decode(gen_tokens, None)
return gen_audio
| audiocraft-main | audiocraft/models/musicgen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Pytorch Unet Module used for diffusion.
"""
from dataclasses import dataclass
import typing as tp
import torch
from torch import nn
from torch.nn import functional as F
from audiocraft.modules.transformer import StreamingTransformer, create_sin_embedding
@dataclass
class Output:
sample: torch.Tensor
def get_model(cfg, channels: int, side: int, num_steps: int):
if cfg.model == 'unet':
return DiffusionUnet(
chin=channels, num_steps=num_steps, **cfg.diffusion_unet)
else:
raise RuntimeError('Not Implemented')
class ResBlock(nn.Module):
def __init__(self, channels: int, kernel: int = 3, norm_groups: int = 4,
dilation: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
stride = 1
padding = dilation * (kernel - stride) // 2
Conv = nn.Conv1d
Drop = nn.Dropout1d
self.norm1 = nn.GroupNorm(norm_groups, channels)
self.conv1 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
self.activation1 = activation()
self.dropout1 = Drop(dropout)
self.norm2 = nn.GroupNorm(norm_groups, channels)
self.conv2 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
self.activation2 = activation()
self.dropout2 = Drop(dropout)
def forward(self, x):
h = self.dropout1(self.conv1(self.activation1(self.norm1(x))))
h = self.dropout2(self.conv2(self.activation2(self.norm2(h))))
return x + h
class DecoderLayer(nn.Module):
def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
padding = (kernel - stride) // 2
self.res_blocks = nn.Sequential(
*[ResBlock(chin, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
for idx in range(res_blocks)])
self.norm = nn.GroupNorm(norm_groups, chin)
ConvTr = nn.ConvTranspose1d
self.convtr = ConvTr(chin, chout, kernel, stride, padding, bias=False)
self.activation = activation()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.res_blocks(x)
x = self.norm(x)
x = self.activation(x)
x = self.convtr(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
padding = (kernel - stride) // 2
Conv = nn.Conv1d
self.conv = Conv(chin, chout, kernel, stride, padding, bias=False)
self.norm = nn.GroupNorm(norm_groups, chout)
self.activation = activation()
self.res_blocks = nn.Sequential(
*[ResBlock(chout, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
for idx in range(res_blocks)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, T = x.shape
stride, = self.conv.stride
pad = (stride - (T % stride)) % stride
x = F.pad(x, (0, pad))
x = self.conv(x)
x = self.norm(x)
x = self.activation(x)
x = self.res_blocks(x)
return x
class BLSTM(nn.Module):
"""BiLSTM with same hidden units as input dim.
"""
def __init__(self, dim, layers=2):
super().__init__()
self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim)
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x):
x = x.permute(2, 0, 1)
x = self.lstm(x)[0]
x = self.linear(x)
x = x.permute(1, 2, 0)
return x
class DiffusionUnet(nn.Module):
def __init__(self, chin: int = 3, hidden: int = 24, depth: int = 3, growth: float = 2.,
max_channels: int = 10_000, num_steps: int = 1000, emb_all_layers=False, cross_attention: bool = False,
bilstm: bool = False, transformer: bool = False,
codec_dim: tp.Optional[int] = None, **kwargs):
super().__init__()
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
self.embeddings: tp.Optional[nn.ModuleList] = None
self.embedding = nn.Embedding(num_steps, hidden)
if emb_all_layers:
self.embeddings = nn.ModuleList()
self.condition_embedding: tp.Optional[nn.Module] = None
for d in range(depth):
encoder = EncoderLayer(chin, hidden, **kwargs)
decoder = DecoderLayer(hidden, chin, **kwargs)
self.encoders.append(encoder)
self.decoders.insert(0, decoder)
if emb_all_layers and d > 0:
assert self.embeddings is not None
self.embeddings.append(nn.Embedding(num_steps, hidden))
chin = hidden
hidden = min(int(chin * growth), max_channels)
self.bilstm: tp.Optional[nn.Module]
if bilstm:
self.bilstm = BLSTM(chin)
else:
self.bilstm = None
self.use_transformer = transformer
self.cross_attention = False
if transformer:
self.cross_attention = cross_attention
self.transformer = StreamingTransformer(chin, 8, 6, bias_ff=False, bias_attn=False,
cross_attention=cross_attention)
self.use_codec = False
if codec_dim is not None:
self.conv_codec = nn.Conv1d(codec_dim, chin, 1)
self.use_codec = True
def forward(self, x: torch.Tensor, step: tp.Union[int, torch.Tensor], condition: tp.Optional[torch.Tensor] = None):
skips = []
bs = x.size(0)
z = x
view_args = [1]
if type(step) is torch.Tensor:
step_tensor = step
else:
step_tensor = torch.tensor([step], device=x.device, dtype=torch.long).expand(bs)
for idx, encoder in enumerate(self.encoders):
z = encoder(z)
if idx == 0:
z = z + self.embedding(step_tensor).view(bs, -1, *view_args).expand_as(z)
elif self.embeddings is not None:
z = z + self.embeddings[idx - 1](step_tensor).view(bs, -1, *view_args).expand_as(z)
skips.append(z)
if self.use_codec: # insert condition in the bottleneck
assert condition is not None, "Model defined for conditionnal generation"
condition_emb = self.conv_codec(condition) # reshape to the bottleneck dim
assert condition_emb.size(-1) <= 2 * z.size(-1), \
f"You are downsampling the conditionning with factor >=2 : {condition_emb.size(-1)=} and {z.size(-1)=}"
if not self.cross_attention:
condition_emb = torch.nn.functional.interpolate(condition_emb, z.size(-1))
assert z.size() == condition_emb.size()
z += condition_emb
cross_attention_src = None
else:
cross_attention_src = condition_emb.permute(0, 2, 1) # B, T, C
B, T, C = cross_attention_src.shape
positions = torch.arange(T, device=x.device).view(1, -1, 1)
pos_emb = create_sin_embedding(positions, C, max_period=10_000, dtype=cross_attention_src.dtype)
cross_attention_src = cross_attention_src + pos_emb
if self.use_transformer:
z = self.transformer(z.permute(0, 2, 1), cross_attention_src=cross_attention_src).permute(0, 2, 1)
else:
if self.bilstm is None:
z = torch.zeros_like(z)
else:
z = self.bilstm(z)
for decoder in self.decoders:
s = skips.pop(-1)
z = z[:, :, :s.shape[2]]
z = z + s
z = decoder(z)
z = z[:, :, :x.shape[2]]
return Output(z)
| audiocraft-main | audiocraft/models/unet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility functions to load from the checkpoints.
Each checkpoint is a torch.saved dict with the following keys:
- 'xp.cfg': the hydra config as dumped during training. This should be used
to rebuild the object using the audiocraft.models.builders functions,
- 'model_best_state': a readily loadable best state for the model, including
the conditioner. The model obtained from `xp.cfg` should be compatible
with this state dict. In the case of a LM, the encodec model would not be
bundled along but instead provided separately.
Those functions also support loading from a remote location with the Torch Hub API.
They also support overriding some parameters, in particular the device and dtype
of the returned model.
"""
from pathlib import Path
from huggingface_hub import hf_hub_download
import typing as tp
import os
from omegaconf import OmegaConf, DictConfig
import torch
from . import builders
from .encodec import CompressionModel
def get_audiocraft_cache_dir() -> tp.Optional[str]:
return os.environ.get('AUDIOCRAFT_CACHE_DIR', None)
def _get_state_dict(
file_or_url_or_id: tp.Union[Path, str],
filename: tp.Optional[str] = None,
device='cpu',
cache_dir: tp.Optional[str] = None,
):
if cache_dir is None:
cache_dir = get_audiocraft_cache_dir()
# Return the state dict either from a file or url
file_or_url_or_id = str(file_or_url_or_id)
assert isinstance(file_or_url_or_id, str)
if os.path.isfile(file_or_url_or_id):
return torch.load(file_or_url_or_id, map_location=device)
if os.path.isdir(file_or_url_or_id):
file = f"{file_or_url_or_id}/{filename}"
return torch.load(file, map_location=device)
elif file_or_url_or_id.startswith('https://'):
return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
else:
assert filename is not None, "filename needs to be defined if using HF checkpoints"
file = hf_hub_download(repo_id=file_or_url_or_id, filename=filename, cache_dir=cache_dir)
return torch.load(file, map_location=device)
def load_compression_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
if 'pretrained' in pkg:
return CompressionModel.get_pretrained(pkg['pretrained'], device=device)
cfg = OmegaConf.create(pkg['xp.cfg'])
cfg.device = str(device)
model = builders.get_compression_model(cfg)
model.load_state_dict(pkg['best_state'])
model.eval()
return model
def load_lm_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
def _delete_param(cfg: DictConfig, full_name: str):
parts = full_name.split('.')
for part in parts[:-1]:
if part in cfg:
cfg = cfg[part]
else:
return
OmegaConf.set_struct(cfg, False)
if parts[-1] in cfg:
del cfg[parts[-1]]
OmegaConf.set_struct(cfg, True)
def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
cfg = OmegaConf.create(pkg['xp.cfg'])
cfg.device = str(device)
if cfg.device == 'cpu':
cfg.dtype = 'float32'
else:
cfg.dtype = 'float16'
_delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')
_delete_param(cfg, 'conditioners.args.merge_text_conditions_p')
_delete_param(cfg, 'conditioners.args.drop_desc_p')
model = builders.get_lm_model(cfg)
model.load_state_dict(pkg['best_state'])
model.eval()
model.cfg = cfg
return model
def load_mbd_ckpt(file_or_url_or_id: tp.Union[Path, str],
filename: tp.Optional[str] = None,
cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename=filename, cache_dir=cache_dir)
def load_diffusion_models(file_or_url_or_id: tp.Union[Path, str],
device='cpu',
filename: tp.Optional[str] = None,
cache_dir: tp.Optional[str] = None):
pkg = load_mbd_ckpt(file_or_url_or_id, filename=filename, cache_dir=cache_dir)
models = []
processors = []
cfgs = []
sample_rate = pkg['sample_rate']
for i in range(pkg['n_bands']):
cfg = pkg[i]['cfg']
model = builders.get_diffusion_model(cfg)
model_dict = pkg[i]['model_state']
model.load_state_dict(model_dict)
model.to(device)
processor = builders.get_processor(cfg=cfg.processor, sample_rate=sample_rate)
processor_dict = pkg[i]['processor_state']
processor.load_state_dict(processor_dict)
processor.to(device)
models.append(model)
processors.append(processor)
cfgs.append(cfg)
return models, processors, cfgs
| audiocraft-main | audiocraft/models/loaders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Models for EnCodec, AudioGen, MusicGen, as well as the generic LMModel.
"""
# flake8: noqa
from . import builders, loaders
from .encodec import (
CompressionModel, EncodecModel, DAC,
HFEncodecModel, HFEncodecCompressionModel)
from .audiogen import AudioGen
from .lm import LMModel
from .multibanddiffusion import MultiBandDiffusion
from .musicgen import MusicGen
from .unet import DiffusionUnet
| audiocraft-main | audiocraft/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Multi Band Diffusion models as described in
"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion"
(paper link).
"""
import typing as tp
import torch
import julius
from .unet import DiffusionUnet
from ..modules.diffusion_schedule import NoiseSchedule
from .encodec import CompressionModel
from ..solvers.compression import CompressionSolver
from .loaders import load_compression_model, load_diffusion_models
class DiffusionProcess:
"""Sampling for a diffusion Model.
Args:
model (DiffusionUnet): Diffusion U-Net model.
noise_schedule (NoiseSchedule): Noise schedule for diffusion process.
"""
def __init__(self, model: DiffusionUnet, noise_schedule: NoiseSchedule) -> None:
"""
"""
self.model = model
self.schedule = noise_schedule
def generate(self, condition: torch.Tensor, initial_noise: torch.Tensor,
step_list: tp.Optional[tp.List[int]] = None):
"""Perform one diffusion process to generate one of the bands.
Args:
condition (tensor): The embeddings form the compression model.
initial_noise (tensor): The initial noise to start the process/
"""
return self.schedule.generate_subsampled(model=self.model, initial=initial_noise, step_list=step_list,
condition=condition)
class MultiBandDiffusion:
"""Sample from multiple diffusion models.
Args:
DPs (list of DiffusionProcess): Diffusion processes.
codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.
"""
def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:
self.DPs = DPs
self.codec_model = codec_model
self.device = next(self.codec_model.parameters()).device
@property
def sample_rate(self) -> int:
return self.codec_model.sample_rate
@staticmethod
def get_mbd_musicgen(device=None):
"""Load our diffusion models trained for MusicGen."""
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
path = 'facebook/multiband-diffusion'
filename = 'mbd_musicgen_32khz.th'
name = 'facebook/musicgen-small'
codec_model = load_compression_model(name, device=device)
models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)
DPs = []
for i in range(len(models)):
schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)
DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))
return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)
@staticmethod
def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,
device: tp.Optional[tp.Union[torch.device, str]] = None,
n_q: tp.Optional[int] = None):
"""Get the pretrained Models for MultibandDiffusion.
Args:
bw (float): Bandwidth of the compression model.
pretrained (bool): Whether to use / download if necessary the models.
device (torch.device or str, optional): Device on which the models are loaded.
n_q (int, optional): Number of quantizers to use within the compression model.
"""
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
assert bw in [1.5, 3.0, 6.0], f"bandwidth {bw} not available"
if n_q is not None:
assert n_q in [2, 4, 8]
assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \
f"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}"
n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]
codec_model = CompressionSolver.model_from_checkpoint(
'//pretrained/facebook/encodec_24khz', device=device)
codec_model.set_num_codebooks(n_q)
codec_model = codec_model.to(device)
path = 'facebook/multiband-diffusion'
filename = f'mbd_comp_{n_q}.pt'
models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)
DPs = []
for i in range(len(models)):
schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)
DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))
return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)
return MultiBandDiffusion(DPs, codec_model)
@torch.no_grad()
def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
"""Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.
Args:
wav (torch.Tensor): The audio that we want to extract the conditioning from
sample_rate (int): sample rate of the audio"""
if sample_rate != self.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.sample_rate)
codes, scale = self.codec_model.encode(wav)
assert scale is None, "Scaled compression models not supported."
emb = self.get_emb(codes)
return emb
@torch.no_grad()
def get_emb(self, codes: torch.Tensor):
"""Get latent representation from the discrete codes
Argrs:
codes (torch.Tensor): discrete tokens"""
emb = self.codec_model.decode_latent(codes)
return emb
def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,
step_list: tp.Optional[tp.List[int]] = None):
"""Generate Wavform audio from the latent embeddings of the compression model
Args:
emb (torch.Tensor): Conditioning embeddinds
size (none torch.Size): size of the output
if None this is computed from the typical upsampling of the model
step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.
"""
if size is None:
upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)
size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])
assert size[0] == emb.size(0)
out = torch.zeros(size).to(self.device)
for DP in self.DPs:
out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))
return out
def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):
"""match the eq to the encodec output by matching the standard deviation of some frequency bands
Args:
wav (torch.Tensor): audio to equalize
ref (torch.Tensor):refenrence audio from which we match the spectrogram.
n_bands (int): number of bands of the eq
strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.
"""
split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)
bands = split(wav)
bands_ref = split(ref)
out = torch.zeros_like(ref)
for i in range(n_bands):
out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness
return out
def regenerate(self, wav: torch.Tensor, sample_rate: int):
"""Regenerate a wavform through compression and diffusion regeneration.
Args:
wav (torch.Tensor): Original 'ground truth' audio
sample_rate (int): sample rate of the input (and output) wav
"""
if sample_rate != self.codec_model.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)
emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)
size = wav.size()
out = self.generate(emb, size=size)
if sample_rate != self.codec_model.sample_rate:
out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)
return out
def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):
"""Generate Waveform audio with diffusion from the discrete codes.
Args:
tokens (torch.Tensor): discrete codes
n_bands (int): bands for the eq matching.
"""
wav_encodec = self.codec_model.decode(tokens)
condition = self.get_emb(tokens)
wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())
return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)
| audiocraft-main | audiocraft/models/multibanddiffusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Main model for using AudioGen. This will combine all the required components
and provide easy access to the generation API.
"""
import typing as tp
import torch
from .encodec import CompressionModel
from .lm import LMModel
from .builders import get_debug_compression_model, get_debug_lm_model
from .loaders import load_compression_model, load_lm_model
from ..data.audio_utils import convert_audio
from ..modules.conditioners import ConditioningAttributes
from ..utils.autocast import TorchAutocast
class AudioGen:
"""AudioGen main model with convenient generation API.
Args:
name (str): name of the model.
compression_model (CompressionModel): Compression model
used to map audio to invertible discrete representations.
lm (LMModel): Language model over discrete representations.
max_duration (float, optional): maximum duration the model can produce,
otherwise, inferred from the training params.
"""
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
max_duration: tp.Optional[float] = None):
self.name = name
self.compression_model = compression_model
self.lm = lm
if max_duration is None:
if hasattr(lm, 'cfg'):
max_duration = lm.cfg.dataset.segment_duration # type: ignore
else:
raise ValueError("You must provide max_duration when building directly AudioGen")
assert max_duration is not None
self.max_duration: float = max_duration
self.device = next(iter(lm.parameters())).device
self.generation_params: dict = {}
self.set_generation_params(duration=5) # 5 seconds by default
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
if self.device.type == 'cpu':
self.autocast = TorchAutocast(enabled=False)
else:
self.autocast = TorchAutocast(
enabled=True, device_type=self.device.type, dtype=torch.float16)
@property
def frame_rate(self) -> float:
"""Roughly the number of AR steps per seconds."""
return self.compression_model.frame_rate
@property
def sample_rate(self) -> int:
"""Sample rate of the generated audio."""
return self.compression_model.sample_rate
@property
def audio_channels(self) -> int:
"""Audio channels of the generated audio."""
return self.compression_model.channels
@staticmethod
def get_pretrained(name: str = 'facebook/audiogen-medium', device=None):
"""Return pretrained model, we provide a single model for now:
- facebook/audiogen-medium (1.5B), text to sound,
# see: https://huggingface.co/facebook/audiogen-medium
"""
if device is None:
if torch.cuda.device_count():
device = 'cuda'
else:
device = 'cpu'
if name == 'debug':
# used only for unit tests
compression_model = get_debug_compression_model(device, sample_rate=16000)
lm = get_debug_lm_model(device)
return AudioGen(name, compression_model, lm, max_duration=10)
compression_model = load_compression_model(name, device=device)
lm = load_lm_model(name, device=device)
assert 'self_wav' not in lm.condition_provider.conditioners, \
"AudioGen do not support waveform conditioning for now"
return AudioGen(name, compression_model, lm)
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
top_p: float = 0.0, temperature: float = 1.0,
duration: float = 10.0, cfg_coef: float = 3.0,
two_step_cfg: bool = False, extend_stride: float = 2):
"""Set the generation parameters for AudioGen.
Args:
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
top_k (int, optional): top_k used for sampling. Defaults to 250.
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
duration (float, optional): Duration of the generated waveform. Defaults to 10.0.
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
instead of batching together the two. This has some impact on how things
are padded but seems to have little impact in practice.
extend_stride: when doing extended generation (i.e. more than 10 seconds), by how much
should we extend the audio each time. Larger values will mean less context is
preserved, and shorter value will require extra computations.
"""
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
self.extend_stride = extend_stride
self.duration = duration
self.generation_params = {
'use_sampling': use_sampling,
'temp': temperature,
'top_k': top_k,
'top_p': top_p,
'cfg_coef': cfg_coef,
'two_step_cfg': two_step_cfg,
}
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
"""Override the default progress callback."""
self._progress_callback = progress_callback
def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor:
"""Generate samples conditioned on text.
Args:
descriptions (list of str): A list of strings used as text conditioning.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
assert prompt_tokens is None
return self._generate_tokens(attributes, prompt_tokens, progress)
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
progress: bool = False) -> torch.Tensor:
"""Generate samples conditioned on audio prompts.
Args:
prompt (torch.Tensor): A batch of waveforms used for continuation.
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if prompt.dim() == 2:
prompt = prompt[None]
if prompt.dim() != 3:
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
if descriptions is None:
descriptions = [None] * len(prompt)
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
assert prompt_tokens is not None
return self._generate_tokens(attributes, prompt_tokens, progress)
@torch.no_grad()
def _prepare_tokens_and_attributes(
self,
descriptions: tp.Sequence[tp.Optional[str]],
prompt: tp.Optional[torch.Tensor],
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
"""Prepare model inputs.
Args:
descriptions (list of str): A list of strings used as text conditioning.
prompt (torch.Tensor): A batch of waveforms used for continuation.
"""
attributes = [
ConditioningAttributes(text={'description': description})
for description in descriptions]
if prompt is not None:
if descriptions is not None:
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
prompt = prompt.to(self.device)
prompt_tokens, scale = self.compression_model.encode(prompt)
assert scale is None
else:
prompt_tokens = None
return attributes, prompt_tokens
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
"""Generate discrete audio tokens given audio prompt and/or conditions.
Args:
attributes (list of ConditioningAttributes): Conditions used for generation (here text).
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
Returns:
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
"""
total_gen_len = int(self.duration * self.frame_rate)
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
current_gen_offset: int = 0
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
generated_tokens += current_gen_offset
if self._progress_callback is not None:
# Note that total_gen_len might be quite wrong depending on the
# codebook pattern used, but with delay it is almost accurate.
self._progress_callback(generated_tokens, total_gen_len)
else:
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
if prompt_tokens is not None:
assert max_prompt_len >= prompt_tokens.shape[-1], \
"Prompt is longer than audio to generate"
callback = None
if progress:
callback = _progress_callback
if self.duration <= self.max_duration:
# generate by sampling from LM, simple case.
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
else:
all_tokens = []
if prompt_tokens is None:
prompt_length = 0
else:
all_tokens.append(prompt_tokens)
prompt_length = prompt_tokens.shape[-1]
stride_tokens = int(self.frame_rate * self.extend_stride)
while current_gen_offset + prompt_length < total_gen_len:
time_offset = current_gen_offset / self.frame_rate
chunk_duration = min(self.duration - time_offset, self.max_duration)
max_gen_len = int(chunk_duration * self.frame_rate)
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
if prompt_tokens is None:
all_tokens.append(gen_tokens)
else:
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
prompt_tokens = gen_tokens[:, :, stride_tokens:]
prompt_length = prompt_tokens.shape[-1]
current_gen_offset += stride_tokens
gen_tokens = torch.cat(all_tokens, dim=-1)
# generate audio
assert gen_tokens.dim() == 3
with torch.no_grad():
gen_audio = self.compression_model.decode(gen_tokens, None)
return gen_audio
| audiocraft-main | audiocraft/models/audiogen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from functools import partial
import logging
import math
import typing as tp
import torch
from torch import nn
from ..utils import utils
from ..modules.streaming import StreamingModule, State
from ..modules.transformer import StreamingTransformer, create_norm_fn
from ..modules.conditioners import (
ConditionFuser,
ClassifierFreeGuidanceDropout,
AttributeDropout,
ConditioningProvider,
ConditioningAttributes,
ConditionType,
)
from ..modules.codebooks_patterns import CodebooksPatternProvider
from ..modules.activations import get_activation_fn
logger = logging.getLogger(__name__)
ConditionTensors = tp.Dict[str, ConditionType]
CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
"""LM layer initialization.
Inspired from xlformers: https://github.com/fairinternal/xlformers
Args:
method (str): Method name for init function. Valid options are:
'gaussian', 'uniform'.
input_dim (int): Input dimension of the initialized module.
init_depth (int, optional): Optional init depth value used to rescale
the standard deviation if defined.
"""
# Compute std
std = 1 / math.sqrt(input_dim)
# Rescale with depth
if init_depth is not None:
std = std / math.sqrt(2 * init_depth)
if method == 'gaussian':
return partial(
torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
)
elif method == 'uniform':
bound = math.sqrt(3) * std # ensure the standard deviation is `std`
return partial(torch.nn.init.uniform_, a=-bound, b=bound)
else:
raise ValueError("Unsupported layer initialization method")
def init_layer(m: nn.Module,
method: str,
init_depth: tp.Optional[int] = None,
zero_bias_init: bool = False):
"""Wrapper around ``get_init_fn`` for proper initialization of LM modules.
Args:
m (nn.Module): Module to initialize.
method (str): Method name for the init function.
init_depth (int, optional): Optional init depth value used to rescale
the standard deviation if defined.
zero_bias_init (bool): Whether to initialize the bias to 0 or not.
"""
if isinstance(m, nn.Linear):
init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
weight = m.weight.float()
init_fn(weight)
m.weight.data[:] = weight.half()
else:
init_fn(m.weight)
if zero_bias_init and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Embedding):
init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
weight = m.weight.float()
init_fn(weight)
m.weight.data[:] = weight.half()
else:
init_fn(m.weight)
class ScaledEmbedding(nn.Embedding):
"""Boost learning rate for embeddings (with `scale`).
"""
def __init__(self, *args, lr=None, **kwargs):
super().__init__(*args, **kwargs)
self.lr = lr
def make_optim_group(self):
group = {"params": list(self.parameters())}
if self.lr is not None:
group["lr"] = self.lr
return group
@dataclass
class LMOutput:
# The logits are already re-aligned with the input codes
# hence no extra shift is required, e.g. when computing CE
logits: torch.Tensor # [B, K, T, card]
mask: torch.Tensor # [B, K, T]
class LMModel(StreamingModule):
"""Transformer-based language model on multiple streams of codes.
Args:
pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
n_q (int): Number of parallel streams to model.
card (int): Cardinality, vocabulary size.
dim (int): Dimension of the transformer encoder.
num_heads (int): Number of heads for the transformer encoder.
hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
norm (str): Normalization method.
norm_first (bool): Use pre-norm instead of post-norm.
emb_lr (float, optional): Embedding-specific learning rate.
bias_proj (bool): Use bias for output projections.
weight_init (str, optional): Method for weight initialization.
depthwise_init (str, optional): Method for depthwise weight initialization.
zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
cfg_dropout (float): Classifier-free guidance dropout.
cfg_coef (float): Classifier-free guidance coefficient.
attribute_dropout (dict): Attribute dropout probabilities.
two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
**kwargs: Additional parameters for the transformer encoder.
"""
def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
**kwargs):
super().__init__()
self.cfg_coef = cfg_coef
self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
self.att_dropout = AttributeDropout(p=attribute_dropout)
self.condition_provider = condition_provider
self.fuser = fuser
self.card = card
embed_dim = self.card + 1
self.n_q = n_q
self.dim = dim
self.pattern_provider = pattern_provider
self.two_step_cfg = two_step_cfg
self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
if 'activation' in kwargs:
kwargs['activation'] = get_activation_fn(kwargs['activation'])
self.transformer = StreamingTransformer(
d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
norm=norm, norm_first=norm_first, **kwargs)
self.out_norm: tp.Optional[nn.Module] = None
if norm_first:
self.out_norm = create_norm_fn(norm, dim)
self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
self._init_weights(weight_init, depthwise_init, zero_bias_init)
self._fsdp: tp.Optional[nn.Module]
self.__dict__['_fsdp'] = None
def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
"""Initialization of the transformer module weights.
Args:
weight_init (str, optional): Weight initialization strategy. See ``get_init_fn`` for valid options.
depthwise_init (str, optional): Depthwise initialization strategy. The following options are valid:
'current' where the depth corresponds to the current layer index or 'global' where the total number
of layer is used as depth. If not set, no depthwise initialization strategy is used.
zero_bias_init (bool): Whether to initialize bias to zero or not.
"""
assert depthwise_init is None or depthwise_init in ['current', 'global']
assert depthwise_init is None or weight_init is not None, \
"If 'depthwise_init' is defined, a 'weight_init' method should be provided."
assert not zero_bias_init or weight_init is not None, \
"If 'zero_bias_init', a 'weight_init' method should be provided"
if weight_init is None:
return
for emb_layer in self.emb:
init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
for layer_idx, tr_layer in enumerate(self.transformer.layers):
depth = None
if depthwise_init == 'current':
depth = layer_idx + 1
elif depthwise_init == 'global':
depth = len(self.transformer.layers)
init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
tr_layer.apply(init_fn)
for linear in self.linears:
init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
@property
def special_token_id(self) -> int:
return self.card
@property
def num_codebooks(self) -> int:
return self.n_q
def forward(self, sequence: torch.Tensor,
conditions: tp.List[ConditioningAttributes],
condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
"""Apply language model on sequence and conditions.
Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
S the sequence steps, return the logits with shape [B, card, K, S].
Args:
indices (torch.Tensor): Indices of the codes to model.
conditions (list of ConditioningAttributes): Conditions to use when modeling
the given codes. Note that when evaluating multiple time with the same conditioning
you should pre-compute those and pass them as `condition_tensors`.
condition_tensors (dict[str, ConditionType], optional): Pre-computed conditioning
tensors, see `conditions`.
Returns:
torch.Tensor: Logits.
"""
B, K, S = sequence.shape
assert K == self.num_codebooks, "Sequence shape must match the specified number of codebooks"
input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
if condition_tensors is None:
assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
# apply dropout modules
conditions = self.cfg_dropout(conditions)
conditions = self.att_dropout(conditions)
tokenized = self.condition_provider.tokenize(conditions)
# encode conditions and fuse, both have a streaming cache to not recompute when generating.
condition_tensors = self.condition_provider(tokenized)
else:
assert not conditions, "Shouldn't pass both conditions and condition_tensors."
input_, cross_attention_input = self.fuser(input_, condition_tensors)
out = self.transformer(input_, cross_attention_src=cross_attention_input)
if self.out_norm:
out = self.out_norm(out)
logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
# remove the prefix from the model outputs
if len(self.fuser.fuse2cond['prepend']) > 0:
logits = logits[:, :, -S:]
return logits # [B, K, S, card]
def compute_predictions(
self, codes: torch.Tensor,
conditions: tp.List[ConditioningAttributes],
condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
"""Given an input tensor of codes [B, K, T] and list of conditions, runs the model
forward using the specified codes interleaving pattern.
Args:
codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
K the number of codebooks and T the number of timesteps.
conditions (list of ConditioningAttributes): conditionings to use when modeling
the given codes. Note that when evaluating multiple time with the same conditioning
you should pre-compute those and pass them as `condition_tensors`.
condition_tensors (dict[str, ConditionType], optional): pre-computed conditioning
tensors, see `conditions`.
Returns:
LMOutput: Language model outputs
logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
i.e. the first item corresponds to logits to predict the first code, meaning that
no additional shifting of codes and logits is required.
mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
Given the specified interleaving strategies, parts of the logits and codes should
not be considered as valid predictions because of invalid context.
"""
B, K, T = codes.shape
codes = codes.contiguous()
# map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
pattern = self.pattern_provider.get_pattern(T)
sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
codes, self.special_token_id, keep_only_valid_steps=True
)
# apply model on pattern sequence
model = self if self._fsdp is None else self._fsdp
logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
# map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
# and provide the corresponding mask over invalid positions of tokens
logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
# note: we use nans as special token to make it obvious if we feed unexpected logits
logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
logits, float('nan'), keep_only_valid_steps=True
)
logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
return LMOutput(logits, logits_mask)
def _sample_next_token(self,
sequence: torch.Tensor,
cfg_conditions: CFGConditions,
unconditional_state: State,
use_sampling: bool = False,
temp: float = 1.0,
top_k: int = 0,
top_p: float = 0.0,
cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
"""Sample next token from the model given a sequence and a set of conditions. The model supports
multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
Args:
sequence (torch.Tensor): Current sequence of shape [B, K, S]
with K corresponding to the number of codebooks and S the number of sequence steps.
S = 1 in streaming mode, except for the first step that contains a bigger prompt.
condition_tensors (dict[str, ConditionType): Set of conditions. If CFG is used,
should be twice the batch size, being the concatenation of the conditions + null conditions.
use_sampling (bool): Whether to use a sampling strategy or not.
temp (float): Sampling temperature.
top_k (int): K for "top-k" sampling.
top_p (float): P for "top-p" sampling.
cfg_coef (float, optional): classifier free guidance coefficient
Returns:
next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
"""
B = sequence.shape[0]
cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
model = self if self._fsdp is None else self._fsdp
if self.two_step_cfg and cfg_conditions != {}:
assert isinstance(cfg_conditions, tuple), type(cfg_conditions)
condition_tensors, null_condition_tensors = cfg_conditions
cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
state = self.get_streaming_state()
self.set_streaming_state(unconditional_state)
uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
unconditional_state.update(self.get_streaming_state())
self.set_streaming_state(state)
logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
else:
assert isinstance(cfg_conditions, dict)
condition_tensors = cfg_conditions
if condition_tensors:
# Preparing for CFG, predicting both conditional and unconditional logits.
sequence = torch.cat([sequence, sequence], dim=0)
all_logits = model(
sequence,
conditions=[], condition_tensors=condition_tensors)
if condition_tensors:
cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
else:
logits = all_logits
logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
logits = logits[..., -1] # [B x K x card]
# Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
if use_sampling and temp > 0.0:
probs = torch.softmax(logits / temp, dim=-1)
if top_p > 0.0:
next_token = utils.sample_top_p(probs, p=top_p)
elif top_k > 0:
next_token = utils.sample_top_k(probs, k=top_k)
else:
next_token = utils.multinomial(probs, num_samples=1)
else:
next_token = torch.argmax(logits, dim=-1, keepdim=True)
return next_token
@torch.no_grad()
def generate(self,
prompt: tp.Optional[torch.Tensor] = None,
conditions: tp.List[ConditioningAttributes] = [],
num_samples: tp.Optional[int] = None,
max_gen_len: int = 256,
use_sampling: bool = True,
temp: float = 1.0,
top_k: int = 250,
top_p: float = 0.0,
cfg_coef: tp.Optional[float] = None,
two_step_cfg: tp.Optional[bool] = None,
remove_prompts: bool = False,
check: bool = False,
callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
"""Generate tokens sampling from the model given a prompt or unconditionally. Generation can
be perform in a greedy fashion or using sampling with top K and top P strategies.
Args:
prompt (torch.Tensor, optional): Prompt tokens of shape [B, K, T].
conditions_tensors (list of ConditioningAttributes, optional): List of conditions.
num_samples (int, optional): Number of samples to generate when no prompt and no conditions are given.
max_gen_len (int): Maximum generation length.
use_sampling (bool): Whether to use a sampling strategy or not.
temp (float): Sampling temperature.
top_k (int): K for "top-k" sampling.
top_p (float): P for "top-p" sampling.
cfg_coeff (float, optional): Classifier-free guidance coefficient.
two_step_cfg (bool, optional): Whether to perform classifier-free guidance with two steps generation.
remove_prompts (bool): Whether to remove prompts from generation or not.
check (bool): Whether to apply further checks on generated sequence.
callback (Callback, optional): Callback function to report generation progress.
Returns:
torch.Tensor: Generated tokens.
"""
assert not self.training, "generation shouldn't be used in training mode."
first_param = next(iter(self.parameters()))
device = first_param.device
# Checking all input shapes are consistent.
possible_num_samples = []
if num_samples is not None:
possible_num_samples.append(num_samples)
elif prompt is not None:
possible_num_samples.append(prompt.shape[0])
elif conditions:
possible_num_samples.append(len(conditions))
else:
possible_num_samples.append(1)
assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsistent inputs shapes"
num_samples = possible_num_samples[0]
# below we create set of conditions: one conditional and one unconditional
# to do that we merge the regular condition together with the null condition
# we then do 1 forward pass instead of 2.
# the reason for that is two-fold:
# 1. it is about x2 faster than doing 2 forward passes
# 2. avoid the streaming API treating the 2 passes as part of different time steps
# We also support doing two different passes, in particular to ensure that
# the padding structure is exactly the same between train and test.
# With a batch size of 1, this can be slower though.
cfg_conditions: CFGConditions
two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
if conditions:
null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
if two_step_cfg:
cfg_conditions = (
self.condition_provider(self.condition_provider.tokenize(conditions)),
self.condition_provider(self.condition_provider.tokenize(null_conditions)),
)
else:
conditions = conditions + null_conditions
tokenized = self.condition_provider.tokenize(conditions)
cfg_conditions = self.condition_provider(tokenized)
else:
cfg_conditions = {}
if prompt is None:
assert num_samples > 0
prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
B, K, T = prompt.shape
start_offset = T
assert start_offset < max_gen_len
pattern = self.pattern_provider.get_pattern(max_gen_len)
# this token is used as default value for codes that are not generated yet
unknown_token = -1
# we generate codes up to the max_gen_len that will be mapped to the pattern sequence
gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
# filling the gen_codes with the prompt if needed
gen_codes[..., :start_offset] = prompt
# create the gen_sequence with proper interleaving from the pattern: [B, K, S]
gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
# retrieve the start_offset in the sequence:
# it is the first sequence step that contains the `start_offset` timestep
start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
assert start_offset_sequence is not None
with self.streaming():
unconditional_state = self.get_streaming_state()
prev_offset = 0
gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
for offset in range(start_offset_sequence, gen_sequence_len):
# get current sequence (note that the streaming API is providing the caching over previous offsets)
curr_sequence = gen_sequence[..., prev_offset:offset]
curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
if check:
# check coherence between mask and sequence
assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
# should never happen as gen_sequence is filled progressively
assert not (curr_sequence == unknown_token).any()
# sample next token from the model, next token shape is [B, K, 1]
next_token = self._sample_next_token(
curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
cfg_coef=cfg_coef)
# ensure the tokens that should be masked are properly set to special_token_id
# as the model never output special_token_id
valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
next_token[~valid_mask] = self.special_token_id
# ensure we don't overwrite prompt tokens, we only write over unknown tokens
# (then mask tokens should be left as is as well, which is correct)
gen_sequence[..., offset:offset+1] = torch.where(
gen_sequence[..., offset:offset+1] == unknown_token,
next_token, gen_sequence[..., offset:offset+1]
)
prev_offset = offset
if callback is not None:
callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
unconditional_state.clear()
# ensure sequence has been entirely filled
assert not (gen_sequence == unknown_token).any()
# ensure gen_sequence pattern and mask are matching
# which means the gen_sequence is valid according to the pattern
assert (
gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
).all()
# get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
# sanity checks over the returned codes and corresponding masks
assert (out_codes[..., :max_gen_len] != unknown_token).all()
assert (out_mask[..., :max_gen_len] == 1).all()
out_start_offset = start_offset if remove_prompts else 0
out_codes = out_codes[..., out_start_offset:max_gen_len]
# ensure the returned codes are all valid
assert (out_codes >= 0).all() and (out_codes <= self.card).all()
return out_codes
| audiocraft-main | audiocraft/models/lm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Compression models or wrapper around existing models.
Also defines the main interface that a model must follow to be usable as an audio tokenizer.
"""
from abc import ABC, abstractmethod
import logging
import math
from pathlib import Path
import typing as tp
import numpy as np
import torch
from torch import nn
from transformers import EncodecModel as HFEncodecModel
from .. import quantization as qt
logger = logging.getLogger()
class CompressionModel(ABC, nn.Module):
"""Base API for all compression model that aim at being used as audio tokenizers
with a language model.
"""
@abstractmethod
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
...
@abstractmethod
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
"""See `EncodecModel.encode`."""
...
@abstractmethod
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
"""See `EncodecModel.decode`."""
...
@abstractmethod
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
...
@property
@abstractmethod
def channels(self) -> int:
...
@property
@abstractmethod
def frame_rate(self) -> float:
...
@property
@abstractmethod
def sample_rate(self) -> int:
...
@property
@abstractmethod
def cardinality(self) -> int:
...
@property
@abstractmethod
def num_codebooks(self) -> int:
...
@property
@abstractmethod
def total_codebooks(self) -> int:
...
@abstractmethod
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer."""
...
@staticmethod
def get_pretrained(
name: str, device: tp.Union[torch.device, str] = 'cpu'
) -> 'CompressionModel':
"""Instantiate a CompressionModel from a given pretrained model.
Args:
name (Path or str): name of the pretrained model. See after.
device (torch.device or str): Device on which the model is loaded.
Pretrained models:
- dac_44khz (https://github.com/descriptinc/descript-audio-codec)
- dac_24khz (same)
- facebook/encodec_24khz (https://huggingface.co/facebook/encodec_24khz)
- facebook/encodec_32khz (https://huggingface.co/facebook/encodec_32khz)
- your own model on HugginFace. Export instructions to come...
"""
from . import builders, loaders
model: CompressionModel
if name in ['dac_44khz', 'dac_24khz']:
model_type = name.split('_')[1]
logger.info("Getting pretrained compression model from DAC %s", model_type)
model = DAC(model_type)
elif name in ['debug_compression_model']:
logger.info("Getting pretrained compression model for debug")
model = builders.get_debug_compression_model()
elif Path(name).exists():
# We assume here if the paths exist that it is in fact an AC checkpoint
# that was exported using `audiocraft.utils.export` functions.
model = loaders.load_compression_model(name, device=device)
else:
logger.info("Getting pretrained compression model from HF %s", name)
hf_model = HFEncodecModel.from_pretrained(name)
model = HFEncodecCompressionModel(hf_model).to(device)
return model.to(device).eval()
class EncodecModel(CompressionModel):
"""Encodec model operating on the raw waveform.
Args:
encoder (nn.Module): Encoder network.
decoder (nn.Module): Decoder network.
quantizer (qt.BaseQuantizer): Quantizer network.
frame_rate (int): Frame rate for the latent representation.
sample_rate (int): Audio sample rate.
channels (int): Number of audio channels.
causal (bool): Whether to use a causal version of the model.
renormalize (bool): Whether to renormalize the audio before running the model.
"""
# we need assignment to override the property in the abstract class,
# I couldn't find a better way...
frame_rate: float = 0
sample_rate: int = 0
channels: int = 0
def __init__(self,
encoder: nn.Module,
decoder: nn.Module,
quantizer: qt.BaseQuantizer,
frame_rate: int,
sample_rate: int,
channels: int,
causal: bool = False,
renormalize: bool = False):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.quantizer = quantizer
self.frame_rate = frame_rate
self.sample_rate = sample_rate
self.channels = channels
self.renormalize = renormalize
self.causal = causal
if self.causal:
# we force disabling here to avoid handling linear overlap of segments
# as supported in original EnCodec codebase.
assert not self.renormalize, 'Causal model does not support renormalize'
@property
def total_codebooks(self):
"""Total number of quantizer codebooks available."""
return self.quantizer.total_codebooks
@property
def num_codebooks(self):
"""Active number of codebooks used by the quantizer."""
return self.quantizer.num_codebooks
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer."""
self.quantizer.set_num_codebooks(n)
@property
def cardinality(self):
"""Cardinality of each codebook."""
return self.quantizer.bins
def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
scale: tp.Optional[torch.Tensor]
if self.renormalize:
mono = x.mean(dim=1, keepdim=True)
volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
scale = 1e-8 + volume
x = x / scale
scale = scale.view(-1, 1)
else:
scale = None
return x, scale
def postprocess(self,
x: torch.Tensor,
scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
if scale is not None:
assert self.renormalize
x = x * scale.view(-1, 1, 1)
return x
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
assert x.dim() == 3
length = x.shape[-1]
x, scale = self.preprocess(x)
emb = self.encoder(x)
q_res = self.quantizer(emb, self.frame_rate)
out = self.decoder(q_res.x)
# remove extra padding added by the encoder and decoder
assert out.shape[-1] >= length, (out.shape[-1], length)
out = out[..., :length]
q_res.x = self.postprocess(out, scale)
return q_res
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
"""Encode the given input tensor to quantized representation along with scale parameter.
Args:
x (torch.Tensor): Float tensor of shape [B, C, T]
Returns:
codes, scale (tuple of torch.Tensor, torch.Tensor): Tuple composed of:
codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
scale a float tensor containing the scale for audio renormalizealization.
"""
assert x.dim() == 3
x, scale = self.preprocess(x)
emb = self.encoder(x)
codes = self.quantizer.encode(emb)
return codes, scale
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
"""Decode the given codes to a reconstructed representation, using the scale to perform
audio denormalization if needed.
Args:
codes (torch.Tensor): Int tensor of shape [B, K, T]
scale (torch.Tensor, optional): Float tensor containing the scale value.
Returns:
out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
"""
emb = self.decode_latent(codes)
out = self.decoder(emb)
out = self.postprocess(out, scale)
# out contains extra padding added by the encoder and decoder
return out
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.quantizer.decode(codes)
class DAC(CompressionModel):
def __init__(self, model_type: str = "44khz"):
super().__init__()
try:
import dac.utils
except ImportError:
raise RuntimeError("Could not import dac, make sure it is installed, "
"please run `pip install descript-audio-codec`")
self.model = dac.utils.load_model(model_type=model_type)
self.n_quantizers = self.total_codebooks
self.model.eval()
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
# We don't support training with this.
raise NotImplementedError("Forward and training with DAC not supported.")
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
codes = self.model.encode(x, self.n_quantizers)[1]
return codes, None
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
assert scale is None
z_q = self.decode_latent(codes)
return self.model.decode(z_q)
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.model.quantizer.from_codes(codes)[0]
@property
def channels(self) -> int:
return 1
@property
def frame_rate(self) -> float:
return self.model.sample_rate / self.model.hop_length
@property
def sample_rate(self) -> int:
return self.model.sample_rate
@property
def cardinality(self) -> int:
return self.model.codebook_size
@property
def num_codebooks(self) -> int:
return self.n_quantizers
@property
def total_codebooks(self) -> int:
return self.model.n_codebooks
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer.
"""
assert n >= 1
assert n <= self.total_codebooks
self.n_quantizers = n
class HFEncodecCompressionModel(CompressionModel):
"""Wrapper around HuggingFace Encodec.
"""
def __init__(self, model: HFEncodecModel):
super().__init__()
self.model = model
bws = self.model.config.target_bandwidths
num_codebooks = [
bw * 1000 / (self.frame_rate * math.log2(self.cardinality))
for bw in bws
]
deltas = [nc - int(nc) for nc in num_codebooks]
# Checking we didn't do some bad maths and we indeed have integers!
assert all(deltas) <= 1e-3, deltas
self.possible_num_codebooks = [int(nc) for nc in num_codebooks]
self.set_num_codebooks(max(self.possible_num_codebooks))
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
# We don't support training with this.
raise NotImplementedError("Forward and training with HF EncodecModel not supported.")
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
bandwidth_index = self.possible_num_codebooks.index(self.num_codebooks)
bandwidth = self.model.config.target_bandwidths[bandwidth_index]
res = self.model.encode(x, None, bandwidth)
assert len(res[0]) == 1
assert len(res[1]) == 1
return res[0][0], res[1][0]
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
if scale is None:
scales = [None] # type: ignore
else:
scales = scale # type: ignore
res = self.model.decode(codes[None], scales)
return res[0]
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.model.quantizer.decode(codes.transpose(0, 1))
@property
def channels(self) -> int:
return self.model.config.audio_channels
@property
def frame_rate(self) -> float:
hop_length = int(np.prod(self.model.config.upsampling_ratios))
return self.sample_rate / hop_length
@property
def sample_rate(self) -> int:
return self.model.config.sampling_rate
@property
def cardinality(self) -> int:
return self.model.config.codebook_size
@property
def num_codebooks(self) -> int:
return self._num_codebooks
@property
def total_codebooks(self) -> int:
return max(self.possible_num_codebooks)
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer.
"""
if n not in self.possible_num_codebooks:
raise ValueError(f"Allowed values for num codebooks: {self.possible_num_codebooks}")
self._num_codebooks = n
| audiocraft-main | audiocraft/models/encodec.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import torch
from .base import BaseQuantizer, QuantizedResult
from .core_vq import ResidualVectorQuantization
class ResidualVectorQuantizer(BaseQuantizer):
"""Residual Vector Quantizer.
Args:
dimension (int): Dimension of the codebooks.
n_q (int): Number of residual vector quantizers used.
q_dropout (bool): Random quantizer drop out at train time.
bins (int): Codebook size.
decay (float): Decay for exponential moving average over the codebooks.
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
kmeans_iters (int): Number of iterations used for kmeans initialization.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
orthogonal_reg_weight (float): Orthogonal regularization weights.
orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.
for orthogonal regularization.
"""
def __init__(
self,
dimension: int = 256,
n_q: int = 8,
q_dropout: bool = False,
bins: int = 1024,
decay: float = 0.99,
kmeans_init: bool = True,
kmeans_iters: int = 10,
threshold_ema_dead_code: int = 2,
orthogonal_reg_weight: float = 0.0,
orthogonal_reg_active_codes_only: bool = False,
orthogonal_reg_max_codes: tp.Optional[int] = None,
):
super().__init__()
self.max_n_q = n_q
self.n_q = n_q
self.q_dropout = q_dropout
self.dimension = dimension
self.bins = bins
self.decay = decay
self.kmeans_init = kmeans_init
self.kmeans_iters = kmeans_iters
self.threshold_ema_dead_code = threshold_ema_dead_code
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
self.vq = ResidualVectorQuantization(
dim=self.dimension,
codebook_size=self.bins,
num_quantizers=self.n_q,
decay=self.decay,
kmeans_init=self.kmeans_init,
kmeans_iters=self.kmeans_iters,
threshold_ema_dead_code=self.threshold_ema_dead_code,
orthogonal_reg_weight=self.orthogonal_reg_weight,
orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,
orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,
channels_last=False
)
def forward(self, x: torch.Tensor, frame_rate: int):
n_q = self.n_q
if self.training and self.q_dropout:
n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())
bw_per_q = math.log2(self.bins) * frame_rate / 1000
quantized, codes, commit_loss = self.vq(x, n_q=n_q)
codes = codes.transpose(0, 1)
# codes is [B, K, T], with T frames, K nb of codebooks.
bw = torch.tensor(n_q * bw_per_q).to(x)
return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified frame rate at the given bandwidth.
The RVQ encode method sets the appropriate number of quantizer to use
and returns indices for each quantizer.
"""
n_q = self.n_q
codes = self.vq.encode(x, n_q=n_q)
codes = codes.transpose(0, 1)
# codes is [B, K, T], with T frames, K nb of codebooks.
return codes
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
# codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].
codes = codes.transpose(0, 1)
quantized = self.vq.decode(codes)
return quantized
@property
def total_codebooks(self):
return self.max_n_q
@property
def num_codebooks(self):
return self.n_q
def set_num_codebooks(self, n: int):
assert n > 0 and n <= self.max_n_q
self.n_q = n
| audiocraft-main | audiocraft/quantization/vq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""RVQ."""
# flake8: noqa
from .vq import ResidualVectorQuantizer
from .base import BaseQuantizer, DummyQuantizer, QuantizedResult
| audiocraft-main | audiocraft/quantization/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from einops import rearrange, repeat
import flashy
import torch
from torch import nn, einsum
import torch.nn.functional as F
def exists(val: tp.Optional[tp.Any]) -> bool:
return val is not None
def default(val: tp.Any, d: tp.Any) -> tp.Any:
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, p=2, dim=-1)
def ema_inplace(moving_avg, new, decay: float):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
return (x + epsilon) / (x.sum() + n_categories * epsilon)
def uniform_init(*shape: int):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def sample_vectors(samples, num: int):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = torch.randperm(num_samples, device=device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device=device)
return samples[indices]
def kmeans(samples, num_clusters: int, num_iters: int = 10):
dim, dtype = samples.shape[-1], samples.dtype
means = sample_vectors(samples, num_clusters)
for _ in range(num_iters):
diffs = rearrange(samples, "n d -> n () d") - rearrange(
means, "c d -> () c d"
)
dists = -(diffs ** 2).sum(dim=-1)
buckets = dists.max(dim=-1).indices
bins = torch.bincount(buckets, minlength=num_clusters)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
new_means = new_means / bins_min_clamped[..., None]
means = torch.where(zero_mask[..., None], means, new_means)
return means, bins
def orthogonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
n = t.shape[0]
normed_codes = l2norm(t)
identity = torch.eye(n, device=t.device)
cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
class EuclideanCodebook(nn.Module):
"""Codebook with Euclidean distance.
Args:
dim (int): Dimension.
codebook_size (int): Codebook size.
kmeans_init (bool): Whether to use k-means to initialize the codebooks.
If set to true, run the k-means algorithm on the first training batch and use
the learned centroids as initialization.
kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
decay (float): Decay for exponential moving average over the codebooks.
epsilon (float): Epsilon value for numerical stability.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
"""
def __init__(
self,
dim: int,
codebook_size: int,
kmeans_init: int = False,
kmeans_iters: int = 10,
decay: float = 0.8,
epsilon: float = 1e-5,
threshold_ema_dead_code: int = 2,
):
super().__init__()
self.decay = decay
init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(codebook_size, dim)
self.codebook_size = codebook_size
self.kmeans_iters = kmeans_iters
self.epsilon = epsilon
self.threshold_ema_dead_code = threshold_ema_dead_code
self.register_buffer("inited", torch.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", torch.zeros(codebook_size))
self.register_buffer("embed", embed)
self.register_buffer("embed_avg", embed.clone())
@torch.jit.ignore
def init_embed_(self, data):
if self.inited:
return
embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.inited.data.copy_(torch.Tensor([True]))
# Make sure all buffers across workers are in sync after initialization
flashy.distrib.broadcast_tensors(self.buffers())
def replace_(self, samples, mask):
modified_codebook = torch.where(
mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
)
self.embed.data.copy_(modified_codebook)
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "... d -> (...) d")
self.replace_(batch_samples, mask=expired_codes)
flashy.distrib.broadcast_tensors(self.buffers())
def preprocess(self, x):
x = rearrange(x, "... d -> (...) d")
return x
def quantize(self, x):
embed = self.embed.t()
dist = -(
x.pow(2).sum(1, keepdim=True)
- 2 * x @ embed
+ embed.pow(2).sum(0, keepdim=True)
)
embed_ind = dist.max(dim=-1).indices
return embed_ind
def postprocess_emb(self, embed_ind, shape):
return embed_ind.view(*shape[:-1])
def dequantize(self, embed_ind):
quantize = F.embedding(embed_ind, self.embed)
return quantize
def encode(self, x):
shape = x.shape
# pre-process
x = self.preprocess(x)
# quantize
embed_ind = self.quantize(x)
# post-process
embed_ind = self.postprocess_emb(embed_ind, shape)
return embed_ind
def decode(self, embed_ind):
quantize = self.dequantize(embed_ind)
return quantize
def forward(self, x):
shape, dtype = x.shape, x.dtype
x = self.preprocess(x)
self.init_embed_(x)
embed_ind = self.quantize(x)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = self.postprocess_emb(embed_ind, shape)
quantize = self.dequantize(embed_ind)
if self.training:
# We do the expiry of code at that point as buffers are in sync
# and all the workers will take the same decision.
self.expire_codes_(x)
ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
embed_sum = x.t() @ embed_onehot
ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
cluster_size = (
laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
* self.cluster_size.sum()
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
self.embed.data.copy_(embed_normalized)
return quantize, embed_ind
class VectorQuantization(nn.Module):
"""Vector quantization implementation.
Currently supports only euclidean distance.
Args:
dim (int): Dimension
codebook_size (int): Codebook size
codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
decay (float): Decay for exponential moving average over the codebooks.
epsilon (float): Epsilon value for numerical stability.
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
kmeans_iters (int): Number of iterations used for kmeans initialization.
threshold_ema_dead_code (int):
channels_last (bool): Channels are the last dimension in the input tensors.
commitment_weight (float): Weight for commitment loss.
orthogonal_reg_weight (float): Orthogonal regularization weights.
orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
for orthogonal regularization.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
"""
def __init__(
self,
dim: int,
codebook_size: int,
codebook_dim: tp.Optional[int] = None,
decay: float = 0.8,
epsilon: float = 1e-5,
kmeans_init: bool = False,
kmeans_iters: int = 10,
threshold_ema_dead_code: int = 2,
channels_last: bool = False,
commitment_weight: float = 1.,
orthogonal_reg_weight: float = 0.0,
orthogonal_reg_active_codes_only: bool = False,
orthogonal_reg_max_codes: tp.Optional[int] = None,
):
super().__init__()
_codebook_dim: int = default(codebook_dim, dim)
requires_projection = _codebook_dim != dim
self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
self.epsilon = epsilon
self.commitment_weight = commitment_weight
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
decay=decay, epsilon=epsilon,
threshold_ema_dead_code=threshold_ema_dead_code)
self.codebook_size = codebook_size
self.channels_last = channels_last
@property
def codebook(self):
return self._codebook.embed
@property
def inited(self):
return self._codebook.inited
def _preprocess(self, x):
if not self.channels_last:
x = rearrange(x, "b d n -> b n d")
return x
def _postprocess(self, quantize):
if not self.channels_last:
quantize = rearrange(quantize, "b n d -> b d n")
return quantize
def encode(self, x):
x = self._preprocess(x)
x = self.project_in(x)
embed_in = self._codebook.encode(x)
return embed_in
def decode(self, embed_ind):
quantize = self._codebook.decode(embed_ind)
quantize = self.project_out(quantize)
quantize = self._postprocess(quantize)
return quantize
def forward(self, x):
device = x.device
x = self._preprocess(x)
x = self.project_in(x)
quantize, embed_ind = self._codebook(x)
if self.training:
quantize = x + (quantize - x).detach()
loss = torch.tensor([0.0], device=device, requires_grad=self.training)
if self.training:
if self.commitment_weight > 0:
commit_loss = F.mse_loss(quantize.detach(), x)
loss = loss + commit_loss * self.commitment_weight
if self.orthogonal_reg_weight > 0:
codebook = self.codebook
if self.orthogonal_reg_active_codes_only:
# only calculate orthogonal loss for the activated codes for this batch
unique_code_ids = torch.unique(embed_ind)
codebook = codebook[unique_code_ids]
num_codes = codebook.shape[0]
if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
codebook = codebook[rand_ids]
orthogonal_reg_loss = orthogonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
quantize = self.project_out(quantize)
quantize = self._postprocess(quantize)
return quantize, embed_ind, loss
class ResidualVectorQuantization(nn.Module):
"""Residual vector quantization implementation.
Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
"""
def __init__(self, *, num_quantizers, **kwargs):
super().__init__()
self.layers = nn.ModuleList(
[VectorQuantization(**kwargs) for _ in range(num_quantizers)]
)
def forward(self, x, n_q: tp.Optional[int] = None):
quantized_out = 0.0
residual = x
all_losses = []
all_indices = []
n_q = n_q or len(self.layers)
for i, layer in enumerate(self.layers[:n_q]):
quantized, indices, loss = layer(residual)
residual = residual - quantized
quantized_out = quantized_out + quantized
all_indices.append(indices)
all_losses.append(loss)
out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
return quantized_out, out_indices, out_losses
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
residual = x
all_indices = []
n_q = n_q or len(self.layers)
for layer in self.layers[:n_q]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
quantized_out = torch.tensor(0.0, device=q_indices.device)
for i, indices in enumerate(q_indices):
layer = self.layers[i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
return quantized_out
| audiocraft-main | audiocraft/quantization/core_vq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Base class for all quantizers.
"""
from dataclasses import dataclass, field
import typing as tp
import torch
from torch import nn
@dataclass
class QuantizedResult:
x: torch.Tensor
codes: torch.Tensor
bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
penalty: tp.Optional[torch.Tensor] = None
metrics: dict = field(default_factory=dict)
class BaseQuantizer(nn.Module):
"""Base class for quantizers.
"""
def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
"""
Given input tensor x, returns first the quantized (or approximately quantized)
representation along with quantized codes, bandwidth, and any penalty term for the loss.
Finally, this returns a dict of metrics to update logging etc.
Frame rate must be passed so that the bandwidth is properly computed.
"""
raise NotImplementedError()
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth."""
raise NotImplementedError()
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
raise NotImplementedError()
@property
def total_codebooks(self):
"""Total number of codebooks."""
raise NotImplementedError()
@property
def num_codebooks(self):
"""Number of active codebooks."""
raise NotImplementedError()
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks."""
raise NotImplementedError()
class DummyQuantizer(BaseQuantizer):
"""Fake quantizer that actually does not perform any quantization.
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, frame_rate: int):
q = x.unsqueeze(1)
return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return x.unsqueeze(1)
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return codes.squeeze(1)
@property
def total_codebooks(self):
"""Total number of codebooks."""
return 1
@property
def num_codebooks(self):
"""Total number of codebooks."""
return self.total_codebooks
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks."""
raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
| audiocraft-main | audiocraft/quantization/base.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import numpy as np
import torch.nn as nn
from .conv import StreamableConv1d, StreamableConvTranspose1d
from .lstm import StreamableLSTM
class SEANetResnetBlock(nn.Module):
"""Residual block from SEANet model.
Args:
dim (int): Dimension of the input/output.
kernel_sizes (list): List of kernel sizes for the convolutions.
dilations (list): List of dilations for the convolutions.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
true_skip (bool): Whether to use true skip connection or a simple
(streamable) convolution as the skip connection.
"""
def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1],
activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False,
pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True):
super().__init__()
assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations'
act = getattr(nn, activation)
hidden = dim // compress
block = []
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
in_chs = dim if i == 0 else hidden
out_chs = dim if i == len(kernel_sizes) - 1 else hidden
block += [
act(**activation_params),
StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation,
norm=norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode),
]
self.block = nn.Sequential(*block)
self.shortcut: nn.Module
if true_skip:
self.shortcut = nn.Identity()
else:
self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode)
def forward(self, x):
return self.shortcut(x) + self.block(x)
class SEANetEncoder(nn.Module):
"""SEANet encoder.
Args:
channels (int): Audio channels.
dimension (int): Intermediate representation dimension.
n_filters (int): Base width for the model.
n_residual_layers (int): nb of residual layers.
ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of
upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here
that must match the decoder order. We use the decoder order as some models may only employ the decoder.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
kernel_size (int): Kernel size for the initial convolution.
last_kernel_size (int): Kernel size for the initial convolution.
residual_kernel_size (int): Kernel size for the residual layers.
dilation_base (int): How much to increase the dilation with each layer.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
true_skip (bool): Whether to use true skip connection or a simple
(streamable) convolution as the skip connection in the residual network blocks.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
lstm (int): Number of LSTM layers at the end of the encoder.
disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
For the encoder, it corresponds to the N first blocks.
"""
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
disable_norm_outer_blocks: int = 0):
super().__init__()
self.channels = channels
self.dimension = dimension
self.n_filters = n_filters
self.ratios = list(reversed(ratios))
del ratios
self.n_residual_layers = n_residual_layers
self.hop_length = np.prod(self.ratios)
self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
self.disable_norm_outer_blocks = disable_norm_outer_blocks
assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
"Number of blocks for which to disable norm is invalid." \
"It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
act = getattr(nn, activation)
mult = 1
model: tp.List[nn.Module] = [
StreamableConv1d(channels, mult * n_filters, kernel_size,
norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
# Downsample to raw audio scale
for i, ratio in enumerate(self.ratios):
block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm
# Add residual layers
for j in range(n_residual_layers):
model += [
SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1],
dilations=[dilation_base ** j, 1],
norm=block_norm, norm_params=norm_params,
activation=activation, activation_params=activation_params,
causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
# Add downsampling layers
model += [
act(**activation_params),
StreamableConv1d(mult * n_filters, mult * n_filters * 2,
kernel_size=ratio * 2, stride=ratio,
norm=block_norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode),
]
mult *= 2
if lstm:
model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
model += [
act(**activation_params),
StreamableConv1d(mult * n_filters, dimension, last_kernel_size,
norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class SEANetDecoder(nn.Module):
"""SEANet decoder.
Args:
channels (int): Audio channels.
dimension (int): Intermediate representation dimension.
n_filters (int): Base width for the model.
n_residual_layers (int): nb of residual layers.
ratios (Sequence[int]): kernel size and stride ratios.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
final_activation (str): Final activation function after all convolutions.
final_activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
kernel_size (int): Kernel size for the initial convolution.
last_kernel_size (int): Kernel size for the initial convolution.
residual_kernel_size (int): Kernel size for the residual layers.
dilation_base (int): How much to increase the dilation with each layer.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
true_skip (bool): Whether to use true skip connection or a simple.
(streamable) convolution as the skip connection in the residual network blocks.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
lstm (int): Number of LSTM layers at the end of the encoder.
disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
For the decoder, it corresponds to the N last blocks.
trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup.
If equal to 1.0, it means that all the trimming is done at the right.
"""
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None,
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0):
super().__init__()
self.dimension = dimension
self.channels = channels
self.n_filters = n_filters
self.ratios = ratios
del ratios
self.n_residual_layers = n_residual_layers
self.hop_length = np.prod(self.ratios)
self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
self.disable_norm_outer_blocks = disable_norm_outer_blocks
assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
"Number of blocks for which to disable norm is invalid." \
"It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
act = getattr(nn, activation)
mult = int(2 ** len(self.ratios))
model: tp.List[nn.Module] = [
StreamableConv1d(dimension, mult * n_filters, kernel_size,
norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
if lstm:
model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
# Upsample to raw audio scale
for i, ratio in enumerate(self.ratios):
block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm
# Add upsampling layers
model += [
act(**activation_params),
StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2,
kernel_size=ratio * 2, stride=ratio,
norm=block_norm, norm_kwargs=norm_params,
causal=causal, trim_right_ratio=trim_right_ratio),
]
# Add residual layers
for j in range(n_residual_layers):
model += [
SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1],
dilations=[dilation_base ** j, 1],
activation=activation, activation_params=activation_params,
norm=block_norm, norm_params=norm_params, causal=causal,
pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
mult //= 2
# Add final layers
model += [
act(**activation_params),
StreamableConv1d(n_filters, channels, last_kernel_size,
norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
# Add optional final activation to decoder (eg. tanh)
if final_activation is not None:
final_act = getattr(nn, final_activation)
final_activation_params = final_activation_params or {}
model += [
final_act(**final_activation_params)
]
self.model = nn.Sequential(*model)
def forward(self, z):
y = self.model(z)
return y
| audiocraft-main | audiocraft/modules/seanet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch import Tensor
from typing import Union, Callable
class CustomGLU(nn.Module):
"""Custom Gated Linear Unit activation.
Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half
of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation
function (i.e. sigmoid, swish, etc.).
Args:
activation (nn.Module): The custom activation to apply in the Gated Linear Unit
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = CustomGLU(nn.Sigmoid())
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
def __init__(self, activation: nn.Module, dim: int = -1):
super(CustomGLU, self).__init__()
self.dim = dim
self.activation = activation
def forward(self, x: Tensor):
assert x.shape[self.dim] % 2 == 0 # M = N / 2
a, b = torch.chunk(x, 2, dim=self.dim)
return a * self.activation(b)
class SwiGLU(CustomGLU):
"""SiLU Gated Linear Unit activation.
Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(SwiGLU, self).__init__(nn.SiLU(), dim)
class GeGLU(CustomGLU):
"""GeLU Gated Linear Unit activation.
Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(GeGLU, self).__init__(nn.GELU(), dim)
class ReGLU(CustomGLU):
"""ReLU Gated Linear Unit activation.
Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(ReGLU, self).__init__(nn.ReLU(), dim)
def get_activation_fn(
activation: Union[str, Callable[[Tensor], Tensor]]
) -> Union[str, Callable[[Tensor], Tensor]]:
"""Helper function to map an activation string to the activation class.
If the supplied activation is not a string that is recognized, the activation is passed back.
Args:
activation (str, or Callable[[Tensor], Tensor]): Activation to check
"""
if isinstance(activation, str):
if activation == "reglu":
return ReGLU()
elif activation == "geglu":
return GeGLU()
elif activation == "swiglu":
return SwiGLU()
return activation
| audiocraft-main | audiocraft/modules/activations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Modules used for building the models."""
# flake8: noqa
from .conv import (
NormConv1d,
NormConv2d,
NormConvTranspose1d,
NormConvTranspose2d,
StreamableConv1d,
StreamableConvTranspose1d,
pad_for_conv1d,
pad1d,
unpad1d,
)
from .lstm import StreamableLSTM
from .seanet import SEANetEncoder, SEANetDecoder
from .transformer import StreamingTransformer | audiocraft-main | audiocraft/modules/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
from dataclasses import dataclass
from functools import lru_cache
import logging
import typing as tp
from abc import ABC, abstractmethod
import torch
LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index)
PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates
logger = logging.getLogger(__name__)
@dataclass
class Pattern:
"""Base implementation of a pattern over a sequence with multiple codebooks.
The codebook pattern consists in a layout, defining for each sequence step
the list of coordinates of each codebook timestep in the resulting interleaved sequence.
The first item of the pattern is always an empty list in order to properly insert a special token
to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern
and ``timesteps`` the number of timesteps corresponding to the original sequence.
The pattern provides convenient methods to build and revert interleaved sequences from it:
``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T]
to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size,
K being the number of codebooks, T the number of original timesteps and S the number of sequence steps
for the output sequence. The unfilled positions are replaced with a special token and the built sequence
is returned along with a mask indicating valid tokens.
``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment
of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask
to fill and specify invalid positions if needed.
See the dedicated methods for more details.
"""
# Pattern layout, for each sequence step, we have a list of coordinates
# corresponding to the original codebook timestep and position.
# The first list is always an empty list in order to properly insert
# a special token to start with.
layout: PatternLayout
timesteps: int
n_q: int
def __post_init__(self):
assert len(self.layout) > 0
assert self.layout[0] == []
self._validate_layout()
self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes)
self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes)
logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout))
def _validate_layout(self):
"""Runs checks on the layout to ensure a valid pattern is defined.
A pattern is considered invalid if:
- Multiple timesteps for a same codebook are defined in the same sequence step
- The timesteps for a given codebook are not in ascending order as we advance in the sequence
(this would mean that we have future timesteps before past timesteps).
"""
q_timesteps = {q: 0 for q in range(self.n_q)}
for s, seq_coords in enumerate(self.layout):
if len(seq_coords) > 0:
qs = set()
for coord in seq_coords:
qs.add(coord.q)
last_q_timestep = q_timesteps[coord.q]
assert coord.t >= last_q_timestep, \
f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}"
q_timesteps[coord.q] = coord.t
# each sequence step contains at max 1 coordinate per codebook
assert len(qs) == len(seq_coords), \
f"Multiple entries for a same codebook are found at step {s}"
@property
def num_sequence_steps(self):
return len(self.layout) - 1
@property
def max_delay(self):
max_t_in_seq_coords = 0
for seq_coords in self.layout[1:]:
for coords in seq_coords:
max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1)
return max_t_in_seq_coords - self.timesteps
@property
def valid_layout(self):
valid_step = len(self.layout) - self.max_delay
return self.layout[:valid_step]
def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None):
"""Get codebook coordinates in the layout that corresponds to the specified timestep t
and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step
and the actual codebook coordinates.
"""
assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps"
if q is not None:
assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks"
coords = []
for s, seq_codes in enumerate(self.layout):
for code in seq_codes:
if code.t == t and (q is None or code.q == q):
coords.append((s, code))
return coords
def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]:
return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)]
def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]:
steps_with_timesteps = self.get_steps_with_timestep(t, q)
return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None
def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool,
device: tp.Union[torch.device, str] = 'cpu'):
"""Build scatter indexes corresponding to the pattern, up to the provided sequence_steps.
Args:
timesteps (int): Maximum number of timesteps steps to consider.
keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps.
device (torch.device or str): Device for created tensors.
Returns:
indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S].
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S].
"""
assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern"
# use the proper layout based on whether we limit ourselves to valid steps only or not,
# note that using the valid_layout will result in a truncated sequence up to the valid steps
ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
# single item indexing being super slow with pytorch vs. numpy, so we use numpy here
indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy()
mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy()
# fill indexes with last sequence step value that will correspond to our special token
# the last value is n_q * timesteps as we have flattened z and append special token as the last token
# which will correspond to the index: n_q * timesteps
indexes[:] = n_q * timesteps
# iterate over the pattern and fill scattered indexes and mask
for s, sequence_coords in enumerate(ref_layout):
for coords in sequence_coords:
if coords.t < timesteps:
indexes[coords.q, s] = coords.t + coords.q * timesteps
mask[coords.q, s] = 1
indexes = torch.from_numpy(indexes).to(device)
mask = torch.from_numpy(mask).to(device)
return indexes, mask
def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
"""Build sequence corresponding to the pattern from the input tensor z.
The sequence is built using up to sequence_steps if specified, and non-pattern
coordinates are filled with the special token.
Args:
z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T].
special_token (int): Special token used to fill non-pattern coordinates in the new sequence.
keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
Steps that are beyond valid steps will be replaced by the special_token in that case.
Returns:
values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S
corresponding either to the sequence_steps if provided, otherwise to the length of the pattern.
indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S].
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S].
"""
B, K, T = z.shape
indexes, mask = self._build_pattern_sequence_scatter_indexes(
T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device)
)
z = z.view(B, -1)
# we append the special token as the last index of our flattened z tensor
z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1)
values = z[:, indexes.view(-1)]
values = values.view(B, K, indexes.shape[-1])
return values, indexes, mask
def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int,
keep_only_valid_steps: bool = False,
is_model_output: bool = False,
device: tp.Union[torch.device, str] = 'cpu'):
"""Builds scatter indexes required to retrieve the original multi-codebook sequence
from interleaving pattern.
Args:
sequence_steps (int): Sequence steps.
n_q (int): Number of codebooks.
keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
Steps that are beyond valid steps will be replaced by the special_token in that case.
is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not.
device (torch.device or str): Device for created tensors.
Returns:
indexes (torch.Tensor): Indexes for reconstructing the output, of shape [K, T].
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
"""
ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
# TODO(jade): Do we want to further truncate to only valid timesteps here as well?
timesteps = self.timesteps
assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
assert sequence_steps <= len(ref_layout), \
f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}"
# ensure we take the appropriate indexes to keep the model output from the first special token as well
if is_model_output:
ref_layout = ref_layout[1:]
# single item indexing being super slow with pytorch vs. numpy, so we use numpy here
indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy()
mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy()
# fill indexes with last sequence step value that will correspond to our special token
indexes[:] = n_q * sequence_steps
for s, sequence_codes in enumerate(ref_layout):
if s < sequence_steps:
for code in sequence_codes:
if code.t < timesteps:
indexes[code.q, code.t] = s + code.q * sequence_steps
mask[code.q, code.t] = 1
indexes = torch.from_numpy(indexes).to(device)
mask = torch.from_numpy(mask).to(device)
return indexes, mask
def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
"""Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving.
The sequence is reverted using up to timesteps if specified, and non-pattern coordinates
are filled with the special token.
Args:
s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S].
special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence.
Returns:
values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T
corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise.
indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T].
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
"""
B, K, S = s.shape
indexes, mask = self._build_reverted_sequence_scatter_indexes(
S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device)
)
s = s.view(B, -1)
# we append the special token as the last index of our flattened z tensor
s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1)
values = s[:, indexes.view(-1)]
values = values.view(B, K, indexes.shape[-1])
return values, indexes, mask
def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False):
"""Revert model logits obtained on a sequence built from the pattern
back to a tensor matching the original sequence.
This method is similar to ``revert_pattern_sequence`` with the following specificities:
1. It is designed to work with the extra cardinality dimension
2. We return the logits for the first sequence item that matches the special_token and
which matching target in the original sequence is the first item of the sequence,
while we skip the last logits as there is no matching target
"""
B, card, K, S = logits.shape
indexes, mask = self._build_reverted_sequence_scatter_indexes(
S, K, keep_only_valid_steps, is_model_output=True, device=logits.device
)
logits = logits.reshape(B, card, -1)
# we append the special token as the last index of our flattened z tensor
logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S]
values = logits[:, :, indexes.view(-1)]
values = values.view(B, card, K, indexes.shape[-1])
return values, indexes, mask
class CodebooksPatternProvider(ABC):
"""Abstraction around providing pattern for interleaving codebooks.
The CodebooksPatternProvider abstraction allows to implement various strategies to
define interleaving pattern of sequences composed of multiple codebooks. For a given
number of codebooks `n_q`, the pattern provider can generate a specified pattern
corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern
can be used to construct a new sequence from the original codes respecting the specified
pattern. The pattern is defined as a list of list of code coordinates, code coordinate
being a tuple with the original timestep and codebook to build the new sequence.
Note that all patterns must start with an empty list that is then used to insert a first
sequence step of special tokens in the newly generated sequence.
Args:
n_q (int): number of codebooks.
cached (bool): if True, patterns for a given length are cached. In general
that should be true for efficiency reason to avoid synchronization points.
"""
def __init__(self, n_q: int, cached: bool = True):
assert n_q > 0
self.n_q = n_q
self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore
@abstractmethod
def get_pattern(self, timesteps: int) -> Pattern:
"""Builds pattern with specific interleaving between codebooks.
Args:
timesteps (int): Total number of timesteps.
"""
raise NotImplementedError()
class DelayedPatternProvider(CodebooksPatternProvider):
"""Provider for delayed pattern across delayed codebooks.
Codebooks are delayed in the sequence and sequence steps will contain codebooks
from different timesteps.
Example:
Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence:
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]]
The resulting sequence obtained from the returned pattern is:
[[S, 1, 2, 3, 4],
[S, S, 1, 2, 3],
[S, S, S, 1, 2]]
(with S being a special token)
Args:
n_q (int): Number of codebooks.
delays (list of int, optional): Delay for each of the codebooks.
If delays not defined, each codebook is delayed by 1 compared to the previous one.
flatten_first (int): Flatten the first N timesteps.
empty_initial (int): Prepend with N empty list of coordinates.
"""
def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None,
flatten_first: int = 0, empty_initial: int = 0):
super().__init__(n_q)
if delays is None:
delays = list(range(n_q))
self.delays = delays
self.flatten_first = flatten_first
self.empty_initial = empty_initial
assert len(self.delays) == self.n_q
assert sorted(self.delays) == self.delays
def get_pattern(self, timesteps: int) -> Pattern:
out: PatternLayout = [[]]
max_delay = max(self.delays)
if self.empty_initial:
out += [[] for _ in range(self.empty_initial)]
if self.flatten_first:
for t in range(min(timesteps, self.flatten_first)):
for q in range(self.n_q):
out.append([LayoutCoord(t, q)])
for t in range(self.flatten_first, timesteps + max_delay):
v = []
for q, delay in enumerate(self.delays):
t_for_q = t - delay
if t_for_q >= self.flatten_first:
v.append(LayoutCoord(t_for_q, q))
out.append(v)
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
class ParallelPatternProvider(DelayedPatternProvider):
"""Provider for parallel pattern across codebooks.
This pattern provider is a special case of the delayed pattern with actually no delay,
hence delays=repeat(0, n_q).
Args:
n_q (int): Number of codebooks.
"""
def __init__(self, n_q: int):
super().__init__(n_q, [0] * n_q)
class UnrolledPatternProvider(CodebooksPatternProvider):
"""Provider for unrolling codebooks pattern.
This pattern provider enables to represent the codebook flattened completely or only to some extend
while also specifying a given delay between the flattened codebooks representation, allowing to
unroll the codebooks in the sequence.
Example:
1. Flattening of the codebooks.
By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q),
taking n_q = 3 and timesteps = 4:
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]]
will result into:
[[S, S, 1, S, S, 2, S, S, 3, S, S, 4],
[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
[1, S, S, 2, S, S, 3, S, S, 4, S, S]]
2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step
for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example
taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]:
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]]
will result into:
[[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
[1, S, S, 2, S, S, 3, S, S, 4, S, S]]
3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks
allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the
same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1]
and delays = [0, 3, 3]:
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]]
will result into:
[[S, S, S, 1, S, 2, S, 3, S, 4],
[S, S, S, 1, S, 2, S, 3, S, 4],
[1, 2, 3, S, 4, S, 5, S, 6, S]]
Args:
n_q (int): Number of codebooks.
flattening (list of int, optional): Flattening schema over the codebooks. If not defined,
the codebooks will be flattened to 1 codebook per step, meaning that the sequence will
have n_q extra steps for each timestep.
delays (list of int, optional): Delay for each of the codebooks. If not defined,
no delay is added and therefore will default to [0] * ``n_q``.
Note that two codebooks that will be flattened to the same inner step
should have the same delay, otherwise the pattern is considered as invalid.
"""
FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay'])
def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None,
delays: tp.Optional[tp.List[int]] = None):
super().__init__(n_q)
if flattening is None:
flattening = list(range(n_q))
if delays is None:
delays = [0] * n_q
assert len(flattening) == n_q
assert len(delays) == n_q
assert sorted(flattening) == flattening
assert sorted(delays) == delays
self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening)
self.max_delay = max(delays)
def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]):
"""Build a flattened codebooks representation as a dictionary of inner step
and the actual codebook indices corresponding to the flattened codebook. For convenience, we
also store the delay associated to the flattened codebook to avoid maintaining an extra mapping.
"""
flattened_codebooks: dict = {}
for q, (inner_step, delay) in enumerate(zip(flattening, delays)):
if inner_step not in flattened_codebooks:
flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay)
else:
flat_codebook = flattened_codebooks[inner_step]
assert flat_codebook.delay == delay, (
"Delay and flattening between codebooks is inconsistent: ",
"two codebooks flattened to the same position should have the same delay."
)
flat_codebook.codebooks.append(q)
flattened_codebooks[inner_step] = flat_codebook
return flattened_codebooks
@property
def _num_inner_steps(self):
"""Number of inner steps to unroll between timesteps in order to flatten the codebooks.
"""
return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1
def num_virtual_steps(self, timesteps: int) -> int:
return timesteps * self._num_inner_steps + 1
def get_pattern(self, timesteps: int) -> Pattern:
"""Builds pattern for delay across codebooks.
Args:
timesteps (int): Total number of timesteps.
"""
# the PatternLayout is built as a tuple of sequence position and list of coordinates
# so that it can be reordered properly given the required delay between codebooks of given timesteps
indexed_out: list = [(-1, [])]
max_timesteps = timesteps + self.max_delay
for t in range(max_timesteps):
# for each timestep, we unroll the flattened codebooks,
# emitting the sequence step with the corresponding delay
for step in range(self._num_inner_steps):
if step in self._flattened_codebooks:
# we have codebooks at this virtual step to emit
step_codebooks = self._flattened_codebooks[step]
t_for_q = t + step_codebooks.delay
coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks]
if t_for_q < max_timesteps and t < max_timesteps:
indexed_out.append((t_for_q, coords))
else:
# there is no codebook in this virtual step so we emit an empty list
indexed_out.append((t, []))
out = [coords for _, coords in sorted(indexed_out)]
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
class VALLEPattern(CodebooksPatternProvider):
"""Almost VALL-E style pattern.
We further allow some delays for the codebooks other than the first one.
Args:
n_q (int): Number of codebooks.
delays (list of int, optional): Delay for each of the codebooks.
If delays not defined, each codebook is delayed by 1 compared to the previous one.
"""
def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None):
super().__init__(n_q)
if delays is None:
delays = [0] * (n_q - 1)
self.delays = delays
assert len(self.delays) == self.n_q - 1
assert sorted(self.delays) == self.delays
def get_pattern(self, timesteps: int) -> Pattern:
out: PatternLayout = [[]]
for t in range(timesteps):
out.append([LayoutCoord(t, 0)])
max_delay = max(self.delays)
for t in range(timesteps + max_delay):
v = []
for q, delay in enumerate(self.delays):
t_for_q = t - delay
if t_for_q >= 0:
v.append(LayoutCoord(t_for_q, q + 1))
out.append(v)
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
class MusicLMPattern(CodebooksPatternProvider):
"""Almost MusicLM style pattern. This is equivalent to full flattening
but in a different order.
Args:
n_q (int): Number of codebooks.
group_by (int): Number of codebooks to group together.
"""
def __init__(self, n_q: int, group_by: int = 2):
super().__init__(n_q)
self.group_by = group_by
def get_pattern(self, timesteps: int) -> Pattern:
out: PatternLayout = [[]]
for offset in range(0, self.n_q, self.group_by):
for t in range(timesteps):
for q in range(offset, offset + self.group_by):
out.append([LayoutCoord(t, q)])
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
| audiocraft-main | audiocraft/modules/codebooks_patterns.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Transformer model, with streaming support, xformer attention support
and easy causal attention with a potentially finite receptive field.
See `StreamingTransformer` for more information.
Unlike regular PyTorch Transformer, we make the hard choice that batches are first.
"""
import typing as tp
from einops import rearrange
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.checkpoint import checkpoint as torch_checkpoint
from xformers import ops
from .rope import RotaryEmbedding
from .streaming import StreamingModule
_efficient_attention_backend: str = 'torch'
def set_efficient_attention_backend(backend: str = 'torch'):
# Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster).
global _efficient_attention_backend
assert _efficient_attention_backend in ['xformers', 'torch']
_efficient_attention_backend = backend
def _get_attention_time_dimension() -> int:
if _efficient_attention_backend == 'torch':
return 2
else:
return 1
def _is_profiled() -> bool:
# Return true if we are currently running with a xformers profiler activated.
try:
from xformers.profiler import profiler
except ImportError:
return False
return profiler._Profiler._CURRENT_PROFILER is not None
def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module:
"""Create normalization module for transformer encoder layer.
Args:
norm_type (str): Normalization method.
dim (int): Dimension of the normalized layer.
**kwargs (dict): Additional parameters for normalization layer.
Returns:
nn.Module: Normalization module.
"""
if norm_type == 'layer_norm':
return nn.LayerNorm(dim, eps=1e-5, **kwargs)
else:
raise ValueError(f"Unknown norm type: {norm_type}")
def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""Create sinusoidal positional embedding, with shape `[B, T, C]`.
Args:
positions (torch.Tensor): LongTensor of positions.
dim (int): Dimension of the embedding.
max_period (float): Maximum period of the cosine/sine functions.
dtype (torch.dtype or str): dtype to use to generate the embedding.
Returns:
torch.Tensor: Sinusoidal positional embedding.
"""
# We aim for BTC format
assert dim % 2 == 0
half_dim = dim // 2
positions = positions.to(dtype)
adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)
max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point
phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))
return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)
def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
"""torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers."""
if n_rep == 1:
return x
if _efficient_attention_backend == 'torch':
bs, n_kv_heads, slen, head_dim = x.shape
return (
x[:, :, None, :, :]
.expand(bs, n_kv_heads, n_rep, slen, head_dim)
.reshape(bs, n_kv_heads * n_rep, slen, head_dim)
)
else:
bs, slen, n_kv_heads, head_dim = x.shape
return (
x[:, :, :, None, :]
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
)
class LayerScale(nn.Module):
"""Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
This rescales diagonally the residual outputs close to 0, with a learnt scale.
Args:
channels (int): Number of channels.
init (float): Initial scale.
channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`.
device (torch.device or str, optional): Device on which to initialize the module.
dtype (torch.dtype, optional): dtype to use to initialize the module.
"""
def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True,
device=None, dtype=None):
super().__init__()
self.channel_last = channel_last
self.scale = nn.Parameter(
torch.full((channels,), init,
requires_grad=True, device=device, dtype=dtype))
def forward(self, x: torch.Tensor):
if self.channel_last:
return self.scale * x
else:
return self.scale[:, None] * x
class StreamingMultiheadAttention(StreamingModule):
"""Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation.
Args:
embed_dim (int): Dimension to project to.
num_heads (int): Number of heads.
dropout (float): Dropout level.
bias (bool): Use bias in projections.
causal (bool): Causal mask applied automatically.
past_context (int, optional): Receptive field for the causal mask, infinite if None.
custom (bool): Use custom MHA implementation, for testing / benchmarking.
memory_efficient (bool): Use xformers based memory efficient attention.
attention_as_float32 (bool): Perform the attention as float32
(especially important with memory_efficient as autocast won't do this automatically).
rope (`RotaryEmbedding`, optional): Rope embedding to use.
cross_attention: Should be true when used as a cross attention.
All keys and values must be available at once, streaming is only for the queries.
Cannot be used with `causal` or `rope` (as it wouldn't make sens to
interpret the time steps in the keys relative to those in the queries).
safe_streaming (bool): Bug fix, will go away with xformers update.
qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product.
kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
This will lead to faster decoding time on A100 or other GPUs with tensorcore.
device (torch.device, optional): Device on which to initialize.
dtype (torch.dtype, optional): dtype to use.
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True,
causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False,
memory_efficient: bool = False, attention_as_float32: bool = False,
rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False,
safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1,
device=None, dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
if past_context is not None:
assert causal
self.embed_dim = embed_dim
self.causal = causal
self.past_context = past_context
self.memory_efficient = memory_efficient
self.attention_as_float32 = attention_as_float32
self.rope = rope
self.cross_attention = cross_attention
self.safe_streaming = safe_streaming
self.num_heads = num_heads
self.dropout = dropout
self.kv_repeat = kv_repeat
if cross_attention:
assert not causal, "Causal cannot work with cross attention."
assert rope is None, "Rope cannot work with cross attention."
if memory_efficient:
_verify_xformers_memory_efficient_compat()
self.custom = _is_custom(custom, memory_efficient)
if self.custom:
out_dim = embed_dim
assert num_heads % kv_repeat == 0
assert not cross_attention or kv_repeat == 1
num_kv = num_heads // kv_repeat
kv_dim = (embed_dim // num_heads) * num_kv
out_dim += 2 * kv_dim
in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs)
# We try to follow the default PyTorch MHA convention, to easily compare results.
self.in_proj_weight = in_proj.weight
self.in_proj_bias = in_proj.bias
if bias:
self.in_proj_bias.data.zero_() # Following Pytorch convention
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if bias:
self.out_proj.bias.data.zero_()
else:
assert not qk_layer_norm
assert kv_repeat == 1
self.mha = nn.MultiheadAttention(
embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True,
**factory_kwargs)
self.qk_layer_norm = qk_layer_norm
if qk_layer_norm:
assert self.custom
assert kv_repeat == 1
ln_dim = embed_dim
self.q_layer_norm = nn.LayerNorm(ln_dim)
self.k_layer_norm = nn.LayerNorm(ln_dim)
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if not self.custom:
# Support compat with regular MHA
keys = [n for n, _ in self.mha.named_parameters()]
for key in keys:
if prefix + key in state_dict:
state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype):
# Return a causal mask, accounting for potentially stored past keys/values
# We actually return a bias for the attention score, as this has the same
# convention both in the builtin MHA in Pytorch, and Xformers functions.
time_dim = _get_attention_time_dimension()
if self.memory_efficient:
from xformers.ops import LowerTriangularMask
if current_steps == 1:
# If we only have one step, then we do not need a mask.
return None
elif 'past_keys' in self._streaming_state:
raise RuntimeError("Not supported at the moment")
else:
# Then we can safely use a lower triangular mask
return LowerTriangularMask()
if self._streaming_state:
past_keys = self._streaming_state['past_keys']
past_steps = past_keys.shape[time_dim]
else:
past_steps = 0
queries_pos = torch.arange(
past_steps, current_steps + past_steps, device=device).view(-1, 1)
keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1)
delta = queries_pos - keys_pos
valid = delta >= 0
if self.past_context is not None:
valid &= (delta <= self.past_context)
return torch.where(
valid,
torch.zeros([], device=device, dtype=dtype),
torch.full([], float('-inf'), device=device, dtype=dtype))
def _complete_kv(self, k, v):
time_dim = _get_attention_time_dimension()
if self.cross_attention:
# With cross attention we assume all keys and values
# are already available, and streaming is with respect
# to the queries only.
return k, v
# Complete the key/value pair using the streaming state.
if self._streaming_state:
pk = self._streaming_state['past_keys']
nk = torch.cat([pk, k], dim=time_dim)
if v is k:
nv = nk
else:
pv = self._streaming_state['past_values']
nv = torch.cat([pv, v], dim=time_dim)
else:
nk = k
nv = v
assert nk.shape[time_dim] == nv.shape[time_dim]
offset = 0
if self.past_context is not None:
offset = max(0, nk.shape[time_dim] - self.past_context)
if self._is_streaming:
self._streaming_state['past_keys'] = nk[:, offset:]
if v is not k:
self._streaming_state['past_values'] = nv[:, offset:]
if 'offset' in self._streaming_state:
self._streaming_state['offset'] += offset
else:
self._streaming_state['offset'] = torch.tensor(0)
return nk, nv
def _apply_rope(self, query: torch.Tensor, key: torch.Tensor):
# TODO: fix and verify layout.
assert _efficient_attention_backend == 'xformers', "Rope not supported with torch attn."
# Apply rope embeddings to query and key tensors.
assert self.rope is not None
if 'past_keys' in self._streaming_state:
past_keys_offset = self._streaming_state['past_keys'].shape[1]
else:
past_keys_offset = 0
if 'offset' in self._streaming_state:
past_context_offset = int(self._streaming_state['offset'].item())
else:
past_context_offset = 0
streaming_offset = past_context_offset + past_keys_offset
return self.rope.rotate_qk(query, key, start=streaming_offset)
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
key_padding_mask=None, need_weights=False, attn_mask=None,
average_attn_weights=True, is_causal=False):
assert attn_mask is None
assert not is_causal, ("New param added in torch 2.0.1 not supported, "
"use the causal args in the constructor.")
time_dim = _get_attention_time_dimension()
if time_dim == 2:
layout = "b h t d"
else:
layout = "b t h d"
dtype = query.dtype
if self._is_streaming:
assert self.causal or self.cross_attention, \
"Streaming only available for causal or cross attention"
if self.causal:
# At the moment we specialize only for the self-attention case.
assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value"
assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value"
attn_mask = self._get_mask(query.shape[1], query.device, query.dtype)
if self.custom:
# custom implementation
assert need_weights is False
assert key_padding_mask is None
if self.cross_attention:
# Different queries, keys, values, we have to spit manually the weights
# before applying the linear.
dim = self.in_proj_weight.shape[0] // 3
if self.in_proj_bias is None:
bias_q, bias_k, bias_v = None, None, None
else:
bias_q = self.in_proj_bias[:dim]
bias_k = self.in_proj_bias[dim: 2 * dim]
bias_v = self.in_proj_bias[2 * dim:]
q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
# todo: when streaming, we could actually save k, v and check the shape actually match.
k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v)
if self.qk_layer_norm is True:
q = self.q_layer_norm(q)
k = self.k_layer_norm(k)
q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]]
else:
if not _is_profiled():
# profiling breaks that propertysomehow.
assert query is key, "specialized implementation"
assert value is key, "specialized implementation"
projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
if self.kv_repeat == 1:
if time_dim == 2:
bound_layout = "b h p t d"
else:
bound_layout = "b t p h d"
packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
q, k, v = ops.unbind(packed, dim=2)
else:
embed_dim = self.embed_dim
per_head_dim = (embed_dim // self.num_heads)
kv_heads = self.num_heads // self.kv_repeat
q = projected[:, :, :embed_dim]
start = embed_dim
end = start + per_head_dim * kv_heads
k = projected[:, :, start: end]
v = projected[:, :, end:]
q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads)
k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads)
v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads)
if self.qk_layer_norm is True:
assert self.kv_repeat == 1
q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]]
q = self.q_layer_norm(q)
k = self.k_layer_norm(k)
q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]]
if self.rope:
q, k = self._apply_rope(q, k)
k, v = self._complete_kv(k, v)
if self.kv_repeat > 1:
k = expand_repeated_kv(k, self.kv_repeat)
v = expand_repeated_kv(v, self.kv_repeat)
if self.attention_as_float32:
q, k, v = [x.float() for x in [q, k, v]]
if self.memory_efficient:
p = self.dropout if self.training else 0
if _efficient_attention_backend == 'torch':
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v, is_causal=attn_mask is not None, dropout_p=p)
else:
x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p)
else:
# We include the dot product as float32, for consistency
# with the other implementations that include that step
# as part of the attention. Note that when using `autocast`,
# the einsums would be done as bfloat16, but the softmax
# would be done as bfloat16, so `attention_as_float32` will
# extend a bit the range of operations done in float32,
# although this should make no difference.
q = q / q.shape[-1] ** 0.5
key_layout = layout.replace('t', 'k')
query_layout = layout
if self._is_streaming and self.safe_streaming and q.device.type == 'cuda':
with torch.autocast(device_type=q.device.type, dtype=torch.float32):
pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
else:
pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
if attn_mask is not None:
pre_w = pre_w + attn_mask
w = torch.softmax(pre_w, dim=-1)
w = F.dropout(w, self.dropout, training=self.training).to(v)
# Key and value have the same format.
x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v)
x = x.to(dtype)
x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
x = self.out_proj(x)
else:
key, value = self._complete_kv(key, value)
if self.attention_as_float32:
query, key, value = [x.float() for x in [query, key, value]]
x, _ = self.mha(
query, key, value, key_padding_mask,
need_weights, attn_mask, average_attn_weights)
x = x.to(dtype)
return x, None
class StreamingTransformerLayer(nn.TransformerEncoderLayer):
"""TransformerLayer with Streaming / Causal support.
This also integrates cross_attention, when passing `cross_attention=True`,
rather than having two separate classes like in PyTorch.
Args:
d_model (int): Dimension of the data.
num_heads (int): Number of heads.
dim_feedforward (int): Intermediate dimension of FF module.
dropout (float): Dropout both for MHA and FF.
bias_ff (bool): Use bias for FF.
bias_attn (bool): Use bias for MHA.
causal (bool): Causal mask applied automatically.
past_context (int, optional): Receptive field for the causal mask, infinite if None.
custom (bool): Use custom MHA implementation, for testing / benchmarking.
memory_efficient (bool): Use xformers based memory efficient attention.
attention_as_float32 (bool): Perform the attention as float32
(especially important with memory_efficient as autocast won't do this automatically).
qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention.
qk_layer_norm_cross (bool): Same for the cross attention.
cross_attention (bool): If True, expect to get secondary input for cross-attention.
Cross attention will use the default MHA, as it typically won't require
special treatment.
layer_scale (float, optional): If not None, LayerScale will be used with
the given value as initial scale.
rope (`RotaryEmbedding`, optional): Rope embedding to use.
attention_dropout (float, optional): If not None, separate the value of the dimension dropout
in FFN and of the attention dropout.
kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
This will lead to faster decoding time on A100 or other GPUs with tensorcore.
device (torch.device, optional): Device on which to initialize.
dtype (torch.dtype, optional): dtype to use.
**kwargs: See `nn.TransformerEncoderLayer`.
"""
def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
past_context: tp.Optional[int] = None, custom: bool = False,
memory_efficient: bool = False, attention_as_float32: bool = False,
qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False,
cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None,
kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs):
super().__init__(d_model, num_heads, dim_feedforward, dropout,
device=device, dtype=dtype, batch_first=True, **kwargs)
factory_kwargs = {'device': device, 'dtype': dtype}
# Redefine self_attn to our streaming multi-head attention
attn_kwargs: tp.Dict[str, tp.Any] = {
'embed_dim': d_model,
'num_heads': num_heads,
'dropout': dropout if attention_dropout is None else attention_dropout,
'bias': bias_attn,
'custom': custom,
'memory_efficient': memory_efficient,
'attention_as_float32': attention_as_float32,
}
self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention(
causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm,
kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore
# Redefine feedforward layers to expose bias parameter
self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs)
self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs)
self.layer_scale_1: nn.Module
self.layer_scale_2: nn.Module
if layer_scale is None:
self.layer_scale_1 = nn.Identity()
self.layer_scale_2 = nn.Identity()
else:
self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs)
self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs)
self.cross_attention: tp.Optional[nn.Module] = None
if cross_attention:
self.cross_attention = StreamingMultiheadAttention(
cross_attention=True, qk_layer_norm=qk_layer_norm_cross,
**attn_kwargs, **factory_kwargs)
# Norm and dropout
self.dropout_cross = nn.Dropout(dropout)
# eps value matching that used in PyTorch reference implementation.
self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs)
self.layer_scale_cross: nn.Module
if layer_scale is None:
self.layer_scale_cross = nn.Identity()
else:
self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs)
self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
def _cross_attention_block(self, src: torch.Tensor,
cross_attention_src: torch.Tensor) -> torch.Tensor:
assert self.cross_attention is not None
# queries are from src, keys and values from cross_attention_src.
x = self.cross_attention(
src, cross_attention_src, cross_attention_src, need_weights=False)[0]
return self.dropout_cross(x) # type: ignore
def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore
src_key_padding_mask: tp.Optional[torch.Tensor] = None,
cross_attention_src: tp.Optional[torch.Tensor] = None):
if self.cross_attention is None:
assert cross_attention_src is None
else:
assert cross_attention_src is not None
x = src
if self.norm_first:
x = x + self.layer_scale_1(
self._sa_block(self.norm1(x), src_mask, src_key_padding_mask))
if cross_attention_src is not None:
x = x + self.layer_scale_cross(
self._cross_attention_block(
self.norm_cross(x), cross_attention_src))
x = x + self.layer_scale_2(self._ff_block(self.norm2(x)))
else:
x = self.norm1(x + self.layer_scale_1(
self._sa_block(x, src_mask, src_key_padding_mask)))
if cross_attention_src is not None:
x = self.norm_cross(
x + self.layer_scale_cross(
self._cross_attention_block(src, cross_attention_src)))
x = self.norm2(x + self.layer_scale_2(self._ff_block(x)))
return x
class StreamingTransformer(StreamingModule):
"""Transformer with Streaming / Causal support.
Args:
d_model (int): Dimension of the data.
num_heads (int): Number of heads.
dim_feedforward (int): Intermediate dimension of FF module.
dropout (float): Dropout both for MHA and FF.
bias_ff (bool): Use bias for FF.
bias_attn (bool): Use bias for MHA.
causal (bool): Causal mask applied automatically.
past_context (int, optional): Receptive field for the causal mask, infinite if None.
custom (bool): Use custom MHA implementation, for testing / benchmarking.
memory_efficient (bool): Use xformers based memory efficient attention.
attention_as_float32 (bool): Perform the attention as float32
(especially important with memory_efficient as autocast won't do this automatically).
cross_attention (bool): If True, expect to get secondary input for cross-attention.
layer_scale (float, optional): If not None, LayerScale will be used
with the given value as initial scale.
positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope).
max_period (float): Maximum period of the time embedding.
positional_scale (float): Scale of positional embedding, set to 0 to deactivate.
xpos (bool): Apply xpos exponential decay to positional embedding (rope only).
lr (float, optional): learning rate override through the `make_optim_group` API.
weight_decay (float, optional): Weight_decay override through the `make_optim_group` API.
layer_class: (subclass of `StreamingTransformerLayer): class to use
to initialize the layers, allowing further customization outside of AudioCraft.
checkpointing (str): Checkpointing strategy to reduce memory usage.
No checkpointing if set to 'none'. Per layer checkpointing using PyTorch
if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice,
minimal memory usage, but maximal runtime). Finally, `xformers_default` provide
a policy for opting-out some operations of the checkpointing like
linear layers and attention, providing a middle ground between speed and memory.
device (torch.device, optional): Device on which to initialize.
dtype (torch.dtype, optional): dtype to use.
**kwargs: See `nn.TransformerEncoderLayer`.
"""
def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048,
dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True,
causal: bool = False, past_context: tp.Optional[int] = None,
custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False,
cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1.,
xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None,
layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer,
checkpointing: str = 'none', device=None, dtype=None, **kwargs):
super().__init__()
assert d_model % num_heads == 0
self.positional_embedding = positional_embedding
self.max_period = max_period
self.positional_scale = positional_scale
self.weight_decay = weight_decay
self.lr = lr
assert positional_embedding in ['sin', 'rope', 'sin_rope']
self.rope: tp.Optional[RotaryEmbedding] = None
if self.positional_embedding in ['rope', 'sin_rope']:
assert _is_custom(custom, memory_efficient)
self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
xpos=xpos, scale=positional_scale, device=device)
self.checkpointing = checkpointing
assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm']
if self.checkpointing.startswith('xformers'):
_verify_xformers_internal_compat()
self.layers = nn.ModuleList()
for idx in range(num_layers):
self.layers.append(
layer_class(
d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward,
dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn,
causal=causal, past_context=past_context, custom=custom,
memory_efficient=memory_efficient, attention_as_float32=attention_as_float32,
cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope,
device=device, dtype=dtype, **kwargs))
if self.checkpointing != 'none':
for layer in self.layers:
# see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the
# backward hook inside of FSDP...
layer._magma_checkpointed = True # type: ignore
assert layer.layer_drop == 0., "Need further checking" # type: ignore
def _apply_layer(self, layer, *args, **kwargs):
method = self.checkpointing
if method == 'none':
return layer(*args, **kwargs)
elif method == 'torch':
return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs)
elif method.startswith('xformers'):
from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy
if method == 'xformers_default':
# those operations will be saved, and not recomputed.
# According to Francisco we can get smarter policies but this is a good start.
allow_list = [
"xformers.efficient_attention_forward_cutlass.default",
"xformers_flash.flash_fwd.default",
"aten.addmm.default",
"aten.mm.default",
]
elif method == 'xformers_mm':
# those operations will be saved, and not recomputed.
# According to Francisco we can get smarter policies but this is a good start.
allow_list = [
"aten.addmm.default",
"aten.mm.default",
]
else:
raise ValueError(f"xformers checkpointing xformers policy {method} is not known.")
policy_fn = _get_default_policy(allow_list)
return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs)
else:
raise ValueError(f"Checkpointing method {method} is unknown.")
def forward(self, x: torch.Tensor, *args, **kwargs):
B, T, C = x.shape
if 'offsets' in self._streaming_state:
offsets = self._streaming_state['offsets']
else:
offsets = torch.zeros(B, dtype=torch.long, device=x.device)
if self.positional_embedding in ['sin', 'sin_rope']:
positions = torch.arange(T, device=x.device).view(1, -1, 1)
positions = positions + offsets.view(-1, 1, 1)
pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
x = x + self.positional_scale * pos_emb
for layer in self.layers:
x = self._apply_layer(layer, x, *args, **kwargs)
if self._is_streaming:
self._streaming_state['offsets'] = offsets + T
return x
def make_optim_group(self):
group = {"params": list(self.parameters())}
if self.lr is not None:
group["lr"] = self.lr
if self.weight_decay is not None:
group["weight_decay"] = self.weight_decay
return group
# special attention related function
def _verify_xformers_memory_efficient_compat():
try:
from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa
except ImportError:
raise ImportError(
"xformers is not installed. Please install it and try again.\n"
"To install on AWS and Azure, run \n"
"FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
"pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
"To install on FAIR Cluster, run \n"
"FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
"pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
def _verify_xformers_internal_compat():
try:
from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa
except ImportError:
raise ImportError(
"Francisco's fairinternal xformers is not installed. Please install it and try again.\n"
"To install on AWS and Azure, run \n"
"FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
"pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
"To install on FAIR Cluster, run \n"
"FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
"pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
def _is_custom(custom: bool, memory_efficient: bool):
return custom or memory_efficient
| audiocraft-main | audiocraft/modules/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from torch import nn
import torch
class XPos(nn.Module):
"""Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1).
This applies an exponential decay to the RoPE rotation matrix.
Args:
dim (int): Embedding dimension.
smoothing (float): Smoothing factor applied to the decay rates.
base_scale (int): Base decay rate, given in terms of scaling time.
device (torch.device, optional): Device on which to initialize the module.
dtype (torch.dtype): dtype to use to generate the embedding.
"""
def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512,
device=None, dtype: torch.dtype = torch.float32):
super().__init__()
assert dim % 2 == 0
assert dtype in [torch.float64, torch.float32]
self.dtype = dtype
self.base_scale = base_scale
half_dim = dim // 2
adim = torch.arange(half_dim, device=device, dtype=dtype)
decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing)
self.register_buffer("decay_rates", decay_rates)
self.decay: tp.Optional[torch.Tensor] = None
def get_decay(self, start: int, end: int):
"""Create complex decay tensor, cache values for fast computation."""
if self.decay is None or end > self.decay.shape[0]:
assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker.
idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype)
power = idx / self.base_scale
scale = self.decay_rates ** power.unsqueeze(-1)
self.decay = torch.polar(scale, torch.zeros_like(scale))
return self.decay[start:end] # [T, C/2]
class RotaryEmbedding(nn.Module):
"""Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864).
Args:
dim (int): Embedding dimension (twice the number of frequencies).
max_period (float): Maximum period of the rotation frequencies.
xpos (bool): Use xPos, applies an exponential decay to rotation matrix.
scale (float): Scale of positional embedding, set to 0 to deactivate.
device (torch.device, optional): Device on which to initialize the module.
dtype (torch.dtype): dtype to use to generate the embedding.
"""
def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False,
scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32):
super().__init__()
assert dim % 2 == 0
self.scale = scale
assert dtype in [torch.float64, torch.float32]
self.dtype = dtype
adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)]
frequencies = 1.0 / (max_period ** (adim / dim))
self.register_buffer("frequencies", frequencies)
self.rotation: tp.Optional[torch.Tensor] = None
self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None
def get_rotation(self, start: int, end: int):
"""Create complex rotation tensor, cache values for fast computation."""
if self.rotation is None or end > self.rotation.shape[0]:
assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker.
idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype)
angles = torch.outer(idx, self.frequencies)
self.rotation = torch.polar(torch.ones_like(angles), angles)
return self.rotation[start:end]
def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False):
"""Apply rope rotation to query or key tensor."""
T = x.shape[1]
rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2)
if self.xpos:
decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2)
else:
decay = 1.0
if invert_decay:
decay = decay ** -1
x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2))
scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale)
x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2)
return x_out.type_as(x)
def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0):
""" Apply rope rotation to both query and key tensors.
Supports streaming mode, in which query and key are not expected to have the same shape.
In streaming mode, key will be of length [P + C] with P the cached past timesteps, but
query will be [C] (typically C == 1).
Args:
query (torch.Tensor): Query to rotate.
key (torch.Tensor): Key to rotate.
start (int): Start index of the sequence for time offset.
"""
query_timesteps = query.shape[1]
key_timesteps = key.shape[1]
streaming_offset = key_timesteps - query_timesteps
query_out = self.rotate(query, start + streaming_offset)
key_out = self.rotate(key, start, invert_decay=True)
return query_out, key_out
| audiocraft-main | audiocraft/modules/rope.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Functions for Noise Schedule, defines diffusion process, reverse process and data processor.
"""
from collections import namedtuple
import random
import typing as tp
import julius
import torch
TrainingItem = namedtuple("TrainingItem", "noisy noise step")
def betas_from_alpha_bar(alpha_bar):
alphas = torch.cat([torch.Tensor([alpha_bar[0]]), alpha_bar[1:]/alpha_bar[:-1]])
return 1 - alphas
class SampleProcessor(torch.nn.Module):
def project_sample(self, x: torch.Tensor):
"""Project the original sample to the 'space' where the diffusion will happen."""
return x
def return_sample(self, z: torch.Tensor):
"""Project back from diffusion space to the actual sample space."""
return z
class MultiBandProcessor(SampleProcessor):
"""
MultiBand sample processor. The input audio is splitted across
frequency bands evenly distributed in mel-scale.
Each band will be rescaled to match the power distribution
of Gaussian noise in that band, using online metrics
computed on the first few samples.
Args:
n_bands (int): Number of mel-bands to split the signal over.
sample_rate (int): Sample rate of the audio.
num_samples (int): Number of samples to use to fit the rescaling
for each band. The processor won't be stable
until it has seen that many samples.
power_std (float or list/tensor): The rescaling factor computed to match the
power of Gaussian noise in each band is taken to
that power, i.e. `1.` means full correction of the energy
in each band, and values less than `1` means only partial
correction. Can be used to balance the relative importance
of low vs. high freq in typical audio signals.
"""
def __init__(self, n_bands: int = 8, sample_rate: float = 24_000,
num_samples: int = 10_000, power_std: tp.Union[float, tp.List[float], torch.Tensor] = 1.):
super().__init__()
self.n_bands = n_bands
self.split_bands = julius.SplitBands(sample_rate, n_bands=n_bands)
self.num_samples = num_samples
self.power_std = power_std
if isinstance(power_std, list):
assert len(power_std) == n_bands
power_std = torch.tensor(power_std)
self.register_buffer('counts', torch.zeros(1))
self.register_buffer('sum_x', torch.zeros(n_bands))
self.register_buffer('sum_x2', torch.zeros(n_bands))
self.register_buffer('sum_target_x2', torch.zeros(n_bands))
self.counts: torch.Tensor
self.sum_x: torch.Tensor
self.sum_x2: torch.Tensor
self.sum_target_x2: torch.Tensor
@property
def mean(self):
mean = self.sum_x / self.counts
return mean
@property
def std(self):
std = (self.sum_x2 / self.counts - self.mean**2).clamp(min=0).sqrt()
return std
@property
def target_std(self):
target_std = self.sum_target_x2 / self.counts
return target_std
def project_sample(self, x: torch.Tensor):
assert x.dim() == 3
bands = self.split_bands(x)
if self.counts.item() < self.num_samples:
ref_bands = self.split_bands(torch.randn_like(x))
self.counts += len(x)
self.sum_x += bands.mean(dim=(2, 3)).sum(dim=1)
self.sum_x2 += bands.pow(2).mean(dim=(2, 3)).sum(dim=1)
self.sum_target_x2 += ref_bands.pow(2).mean(dim=(2, 3)).sum(dim=1)
rescale = (self.target_std / self.std.clamp(min=1e-12)) ** self.power_std # same output size
bands = (bands - self.mean.view(-1, 1, 1, 1)) * rescale.view(-1, 1, 1, 1)
return bands.sum(dim=0)
def return_sample(self, x: torch.Tensor):
assert x.dim() == 3
bands = self.split_bands(x)
rescale = (self.std / self.target_std) ** self.power_std
bands = bands * rescale.view(-1, 1, 1, 1) + self.mean.view(-1, 1, 1, 1)
return bands.sum(dim=0)
class NoiseSchedule:
"""Noise schedule for diffusion.
Args:
beta_t0 (float): Variance of the first diffusion step.
beta_t1 (float): Variance of the last diffusion step.
beta_exp (float): Power schedule exponent
num_steps (int): Number of diffusion step.
variance (str): choice of the sigma value for the denoising eq. Choices: "beta" or "beta_tilde"
clip (float): clipping value for the denoising steps
rescale (float): rescaling value to avoid vanishing signals unused by default (i.e 1)
repartition (str): shape of the schedule only power schedule is supported
sample_processor (SampleProcessor): Module that normalize data to match better the gaussian distribution
noise_scale (float): Scaling factor for the noise
"""
def __init__(self, beta_t0: float = 1e-4, beta_t1: float = 0.02, num_steps: int = 1000, variance: str = 'beta',
clip: float = 5., rescale: float = 1., device='cuda', beta_exp: float = 1,
repartition: str = "power", alpha_sigmoid: dict = {}, n_bands: tp.Optional[int] = None,
sample_processor: SampleProcessor = SampleProcessor(), noise_scale: float = 1.0, **kwargs):
self.beta_t0 = beta_t0
self.beta_t1 = beta_t1
self.variance = variance
self.num_steps = num_steps
self.clip = clip
self.sample_processor = sample_processor
self.rescale = rescale
self.n_bands = n_bands
self.noise_scale = noise_scale
assert n_bands is None
if repartition == "power":
self.betas = torch.linspace(beta_t0 ** (1 / beta_exp), beta_t1 ** (1 / beta_exp), num_steps,
device=device, dtype=torch.float) ** beta_exp
else:
raise RuntimeError('Not implemented')
self.rng = random.Random(1234)
def get_beta(self, step: tp.Union[int, torch.Tensor]):
if self.n_bands is None:
return self.betas[step]
else:
return self.betas[:, step] # [n_bands, len(step)]
def get_initial_noise(self, x: torch.Tensor):
if self.n_bands is None:
return torch.randn_like(x)
return torch.randn((x.size(0), self.n_bands, x.size(2)))
def get_alpha_bar(self, step: tp.Optional[tp.Union[int, torch.Tensor]] = None) -> torch.Tensor:
"""Return 'alpha_bar', either for a given step, or as a tensor with its value for each step."""
if step is None:
return (1 - self.betas).cumprod(dim=-1) # works for simgle and multi bands
if type(step) is int:
return (1 - self.betas[:step + 1]).prod()
else:
return (1 - self.betas).cumprod(dim=0)[step].view(-1, 1, 1)
def get_training_item(self, x: torch.Tensor, tensor_step: bool = False) -> TrainingItem:
"""Create a noisy data item for diffusion model training:
Args:
x (torch.Tensor): clean audio data torch.tensor(bs, 1, T)
tensor_step (bool): If tensor_step = false, only one step t is sample,
the whole batch is diffused to the same step and t is int.
If tensor_step = true, t is a tensor of size (x.size(0),)
every element of the batch is diffused to a independently sampled.
"""
step: tp.Union[int, torch.Tensor]
if tensor_step:
bs = x.size(0)
step = torch.randint(0, self.num_steps, size=(bs,), device=x.device)
else:
step = self.rng.randrange(self.num_steps)
alpha_bar = self.get_alpha_bar(step) # [batch_size, n_bands, 1]
x = self.sample_processor.project_sample(x)
noise = torch.randn_like(x)
noisy = (alpha_bar.sqrt() / self.rescale) * x + (1 - alpha_bar).sqrt() * noise * self.noise_scale
return TrainingItem(noisy, noise, step)
def generate(self, model: torch.nn.Module, initial: tp.Optional[torch.Tensor] = None,
condition: tp.Optional[torch.Tensor] = None, return_list: bool = False):
"""Full ddpm reverse process.
Args:
model (nn.Module): Diffusion model.
initial (tensor): Initial Noise.
condition (tensor): Input conditionning Tensor (e.g. encodec compressed representation).
return_list (bool): Whether to return the whole process or only the sampled point.
"""
alpha_bar = self.get_alpha_bar(step=self.num_steps - 1)
current = initial
iterates = [initial]
for step in range(self.num_steps)[::-1]:
with torch.no_grad():
estimate = model(current, step, condition=condition).sample
alpha = 1 - self.betas[step]
previous = (current - (1 - alpha) / (1 - alpha_bar).sqrt() * estimate) / alpha.sqrt()
previous_alpha_bar = self.get_alpha_bar(step=step - 1)
if step == 0:
sigma2 = 0
elif self.variance == 'beta':
sigma2 = 1 - alpha
elif self.variance == 'beta_tilde':
sigma2 = (1 - previous_alpha_bar) / (1 - alpha_bar) * (1 - alpha)
elif self.variance == 'none':
sigma2 = 0
else:
raise ValueError(f'Invalid variance type {self.variance}')
if sigma2 > 0:
previous += sigma2**0.5 * torch.randn_like(previous) * self.noise_scale
if self.clip:
previous = previous.clamp(-self.clip, self.clip)
current = previous
alpha_bar = previous_alpha_bar
if step == 0:
previous *= self.rescale
if return_list:
iterates.append(previous.cpu())
if return_list:
return iterates
else:
return self.sample_processor.return_sample(previous)
def generate_subsampled(self, model: torch.nn.Module, initial: torch.Tensor, step_list: tp.Optional[list] = None,
condition: tp.Optional[torch.Tensor] = None, return_list: bool = False):
"""Reverse process that only goes through Markov chain states in step_list."""
if step_list is None:
step_list = list(range(1000))[::-50] + [0]
alpha_bar = self.get_alpha_bar(step=self.num_steps - 1)
alpha_bars_subsampled = (1 - self.betas).cumprod(dim=0)[list(reversed(step_list))].cpu()
betas_subsampled = betas_from_alpha_bar(alpha_bars_subsampled)
current = initial * self.noise_scale
iterates = [current]
for idx, step in enumerate(step_list[:-1]):
with torch.no_grad():
estimate = model(current, step, condition=condition).sample * self.noise_scale
alpha = 1 - betas_subsampled[-1 - idx]
previous = (current - (1 - alpha) / (1 - alpha_bar).sqrt() * estimate) / alpha.sqrt()
previous_alpha_bar = self.get_alpha_bar(step_list[idx + 1])
if step == step_list[-2]:
sigma2 = 0
previous_alpha_bar = torch.tensor(1.0)
else:
sigma2 = (1 - previous_alpha_bar) / (1 - alpha_bar) * (1 - alpha)
if sigma2 > 0:
previous += sigma2**0.5 * torch.randn_like(previous) * self.noise_scale
if self.clip:
previous = previous.clamp(-self.clip, self.clip)
current = previous
alpha_bar = previous_alpha_bar
if step == 0:
previous *= self.rescale
if return_list:
iterates.append(previous.cpu())
if return_list:
return iterates
else:
return self.sample_processor.return_sample(previous)
| audiocraft-main | audiocraft/modules/diffusion_schedule.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm, weight_norm
CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
'time_group_norm'])
def apply_parametrization_norm(module: nn.Module, norm: str = 'none'):
assert norm in CONV_NORMALIZATIONS
if norm == 'weight_norm':
return weight_norm(module)
elif norm == 'spectral_norm':
return spectral_norm(module)
else:
# We already check was in CONV_NORMALIZATION, so any other choice
# doesn't need reparametrization.
return module
def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs):
"""Return the proper normalization module. If causal is True, this will ensure the returned
module is causal, or return an error if the normalization doesn't support causal evaluation.
"""
assert norm in CONV_NORMALIZATIONS
if norm == 'time_group_norm':
if causal:
raise ValueError("GroupNorm doesn't support causal evaluation.")
assert isinstance(module, nn.modules.conv._ConvNd)
return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
else:
return nn.Identity()
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
padding_total: int = 0) -> int:
"""See `pad_for_conv1d`."""
length = x.shape[-1]
n_frames = (length - kernel_size + padding_total) / stride + 1
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
return ideal_length - length
def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
"""Pad for a convolution to make sure that the last window is full.
Extra padding is added at the end. This is required to ensure that we can rebuild
an output of the same length, as otherwise, even with padding, some time steps
might get removed.
For instance, with total padding = 4, kernel size = 4, stride = 2:
0 0 1 2 3 4 5 0 0 # (0s are padding)
1 2 3 # (output frames of a convolution, last 0 is never used)
0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
1 2 3 4 # once you removed padding, we are missing one time step !
"""
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
return F.pad(x, (0, extra_padding))
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen.
"""
length = x.shape[-1]
padding_left, padding_right = paddings
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
if mode == 'reflect':
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
x = F.pad(x, (0, extra_pad))
padded = F.pad(x, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end]
else:
return F.pad(x, paddings, mode, value)
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
"""Remove padding from x, handling properly zero padding. Only for 1d!"""
padding_left, padding_right = paddings
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
assert (padding_left + padding_right) <= x.shape[-1]
end = x.shape[-1] - padding_right
return x[..., padding_left: end]
class NormConv1d(nn.Module):
"""Wrapper around Conv1d and normalization applied to this conv
to provide a uniform interface across normalization approaches.
"""
def __init__(self, *args, causal: bool = False, norm: str = 'none',
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
super().__init__()
self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
self.norm_type = norm
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class NormConv2d(nn.Module):
"""Wrapper around Conv2d and normalization applied to this conv
to provide a uniform interface across normalization approaches.
"""
def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
super().__init__()
self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
self.norm_type = norm
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class NormConvTranspose1d(nn.Module):
"""Wrapper around ConvTranspose1d and normalization applied to this conv
to provide a uniform interface across normalization approaches.
"""
def __init__(self, *args, causal: bool = False, norm: str = 'none',
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
super().__init__()
self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
self.norm_type = norm
def forward(self, x):
x = self.convtr(x)
x = self.norm(x)
return x
class NormConvTranspose2d(nn.Module):
"""Wrapper around ConvTranspose2d and normalization applied to this conv
to provide a uniform interface across normalization approaches.
"""
def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
super().__init__()
self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
def forward(self, x):
x = self.convtr(x)
x = self.norm(x)
return x
class StreamableConv1d(nn.Module):
"""Conv1d with some builtin handling of asymmetric or causal padding
and normalization.
"""
def __init__(self, in_channels: int, out_channels: int,
kernel_size: int, stride: int = 1, dilation: int = 1,
groups: int = 1, bias: bool = True, causal: bool = False,
norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
pad_mode: str = 'reflect'):
super().__init__()
# warn user on unusual setup between dilation and stride
if stride > 1 and dilation > 1:
warnings.warn("StreamableConv1d has been initialized with stride > 1 and dilation > 1"
f" (kernel_size={kernel_size} stride={stride}, dilation={dilation}).")
self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
dilation=dilation, groups=groups, bias=bias, causal=causal,
norm=norm, norm_kwargs=norm_kwargs)
self.causal = causal
self.pad_mode = pad_mode
def forward(self, x):
B, C, T = x.shape
kernel_size = self.conv.conv.kernel_size[0]
stride = self.conv.conv.stride[0]
dilation = self.conv.conv.dilation[0]
kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
padding_total = kernel_size - stride
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
if self.causal:
# Left padding for causal
x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
return self.conv(x)
class StreamableConvTranspose1d(nn.Module):
"""ConvTranspose1d with some builtin handling of asymmetric or causal padding
and normalization.
"""
def __init__(self, in_channels: int, out_channels: int,
kernel_size: int, stride: int = 1, causal: bool = False,
norm: str = 'none', trim_right_ratio: float = 1.,
norm_kwargs: tp.Dict[str, tp.Any] = {}):
super().__init__()
self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
causal=causal, norm=norm, norm_kwargs=norm_kwargs)
self.causal = causal
self.trim_right_ratio = trim_right_ratio
assert self.causal or self.trim_right_ratio == 1., \
"`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
def forward(self, x):
kernel_size = self.convtr.convtr.kernel_size[0]
stride = self.convtr.convtr.stride[0]
padding_total = kernel_size - stride
y = self.convtr(x)
# We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
# removed at the very end, when keeping only the right length for the output,
# as removing it here would require also passing the length at the matching layer
# in the encoder.
if self.causal:
# Trim the padding on the right according to the specified ratio
# if trim_right_ratio = 1.0, trim everything from right
padding_right = math.ceil(padding_total * self.trim_right_ratio)
padding_left = padding_total - padding_right
y = unpad1d(y, (padding_left, padding_right))
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
y = unpad1d(y, (padding_left, padding_right))
return y
| audiocraft-main | audiocraft/modules/conv.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class StreamableLSTM(nn.Module):
"""LSTM without worrying about the hidden state, nor the layout of the data.
Expects input as convolutional layout.
"""
def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
super().__init__()
self.skip = skip
self.lstm = nn.LSTM(dimension, dimension, num_layers)
def forward(self, x):
x = x.permute(2, 0, 1)
y, _ = self.lstm(x)
if self.skip:
y = y + x
y = y.permute(1, 2, 0)
return y
| audiocraft-main | audiocraft/modules/lstm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Streaming module API that should be implemented by all Streaming components,
"""
from contextlib import contextmanager
import typing as tp
from torch import nn
import torch
State = tp.Dict[str, torch.Tensor]
class StreamingModule(nn.Module):
"""Common API for streaming components.
Each streaming component has a streaming state, which is just a dict[str, Tensor].
By convention, the first dim of each tensor must be the batch size.
Don't use dots in the key names, as this would clash with submodules
(like in state_dict).
If `self._is_streaming` is True, the component should use and remember
the proper state inside `self._streaming_state`.
To set a streaming component in streaming state, use
with module.streaming():
...
This will automatically reset the streaming state when exiting the context manager.
This also automatically propagates to all streaming children module.
Some module might also implement the `StreamingModule.flush` method, although
this one is trickier, as all parents module must be StreamingModule and implement
it as well for it to work properly. See `StreamingSequential` after.
"""
def __init__(self) -> None:
super().__init__()
self._streaming_state: State = {}
self._is_streaming = False
def _apply_named_streaming(self, fn: tp.Any):
for name, module in self.named_modules():
if isinstance(module, StreamingModule):
fn(name, module)
def _set_streaming(self, streaming: bool):
def _set_streaming(name, module):
module._is_streaming = streaming
self._apply_named_streaming(_set_streaming)
@contextmanager
def streaming(self):
"""Context manager to enter streaming mode. Reset streaming state on exit."""
self._set_streaming(True)
try:
yield
finally:
self._set_streaming(False)
self.reset_streaming()
def reset_streaming(self):
"""Reset the streaming state."""
def _reset(name: str, module: StreamingModule):
module._streaming_state.clear()
self._apply_named_streaming(_reset)
def get_streaming_state(self) -> State:
"""Return the streaming state, including that of sub-modules."""
state: State = {}
def _add(name: str, module: StreamingModule):
if name:
name += "."
for key, value in module._streaming_state.items():
state[name + key] = value
self._apply_named_streaming(_add)
return state
def set_streaming_state(self, state: State):
"""Set the streaming state, including that of sub-modules."""
state = dict(state)
def _set(name: str, module: StreamingModule):
if name:
name += "."
module._streaming_state.clear()
for key, value in list(state.items()):
# complexity is not ideal here, but probably fine.
if key.startswith(name):
local_key = key[len(name):]
if '.' not in local_key:
module._streaming_state[local_key] = value
del state[key]
self._apply_named_streaming(_set)
assert len(state) == 0, list(state.keys())
def flush(self, x: tp.Optional[torch.Tensor] = None):
"""Flush any remaining outputs that were waiting for completion.
Typically, for convolutions, this will add the final padding
and process the last buffer.
This should take an optional argument `x`, which will be provided
if a module before this one in the streaming pipeline has already
spitted out a flushed out buffer.
"""
if x is None:
return None
else:
return self(x)
class StreamingSequential(StreamingModule, nn.Sequential):
"""A streaming compatible alternative of `nn.Sequential`.
"""
def flush(self, x: tp.Optional[torch.Tensor] = None):
for module in self:
if isinstance(module, StreamingModule):
x = module.flush(x)
elif x is not None:
x = module(x)
return x
| audiocraft-main | audiocraft/modules/streaming.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from einops import rearrange
from librosa import filters
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
class ChromaExtractor(nn.Module):
"""Chroma extraction and quantization.
Args:
sample_rate (int): Sample rate for the chroma extraction.
n_chroma (int): Number of chroma bins for the chroma extraction.
radix2_exp (int): Size of stft window for the chroma extraction (power of 2, e.g. 12 -> 2^12).
nfft (int, optional): Number of FFT.
winlen (int, optional): Window length.
winhop (int, optional): Window hop size.
argmax (bool, optional): Whether to use argmax. Defaults to False.
norm (float, optional): Norm for chroma normalization. Defaults to inf.
"""
def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, nfft: tp.Optional[int] = None,
winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, argmax: bool = False,
norm: float = torch.inf):
super().__init__()
self.winlen = winlen or 2 ** radix2_exp
self.nfft = nfft or self.winlen
self.winhop = winhop or (self.winlen // 4)
self.sample_rate = sample_rate
self.n_chroma = n_chroma
self.norm = norm
self.argmax = argmax
self.register_buffer('fbanks', torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,
n_chroma=self.n_chroma)), persistent=False)
self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,
hop_length=self.winhop, power=2, center=True,
pad=0, normalized=True)
def forward(self, wav: torch.Tensor) -> torch.Tensor:
T = wav.shape[-1]
# in case we are getting a wav that was dropped out (nullified)
# from the conditioner, make sure wav length is no less that nfft
if T < self.nfft:
pad = self.nfft - T
r = 0 if pad % 2 == 0 else 1
wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)
assert wav.shape[-1] == self.nfft, f"expected len {self.nfft} but got {wav.shape[-1]}"
spec = self.spec(wav).squeeze(1)
raw_chroma = torch.einsum('cf,...ft->...ct', self.fbanks, spec)
norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)
norm_chroma = rearrange(norm_chroma, 'b d t -> b t d')
if self.argmax:
idx = norm_chroma.argmax(-1, keepdim=True)
norm_chroma[:] = 0
norm_chroma.scatter_(dim=-1, index=idx, value=1)
return norm_chroma
| audiocraft-main | audiocraft/modules/chroma.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from itertools import chain
import logging
import math
from pathlib import Path
import random
import re
import typing as tp
import warnings
import einops
from num2words import num2words
import spacy
from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from .chroma import ChromaExtractor
from .streaming import StreamingModule
from .transformer import create_sin_embedding
from ..data.audio import audio_read
from ..data.audio_dataset import SegmentInfo
from ..data.audio_utils import convert_audio
from ..environment import AudioCraftEnvironment
from ..quantization import ResidualVectorQuantizer
from ..utils.autocast import TorchAutocast
from ..utils.cache import EmbeddingCache
from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once
logger = logging.getLogger(__name__)
TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist)
ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask
class WavCondition(tp.NamedTuple):
wav: torch.Tensor
length: torch.Tensor
sample_rate: tp.List[int]
path: tp.List[tp.Optional[str]] = []
seek_time: tp.List[tp.Optional[float]] = []
class JointEmbedCondition(tp.NamedTuple):
wav: torch.Tensor
text: tp.List[tp.Optional[str]]
length: torch.Tensor
sample_rate: tp.List[int]
path: tp.List[tp.Optional[str]] = []
seek_time: tp.List[tp.Optional[float]] = []
@dataclass
class ConditioningAttributes:
text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict)
wav: tp.Dict[str, WavCondition] = field(default_factory=dict)
joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict)
def __getitem__(self, item):
return getattr(self, item)
@property
def text_attributes(self):
return self.text.keys()
@property
def wav_attributes(self):
return self.wav.keys()
@property
def joint_embed_attributes(self):
return self.joint_embed.keys()
@property
def attributes(self):
return {
"text": self.text_attributes,
"wav": self.wav_attributes,
"joint_embed": self.joint_embed_attributes,
}
def to_flat_dict(self):
return {
**{f"text.{k}": v for k, v in self.text.items()},
**{f"wav.{k}": v for k, v in self.wav.items()},
**{f"joint_embed.{k}": v for k, v in self.joint_embed.items()}
}
@classmethod
def from_flat_dict(cls, x):
out = cls()
for k, v in x.items():
kind, att = k.split(".")
out[kind][att] = v
return out
class SegmentWithAttributes(SegmentInfo):
"""Base class for all dataclasses that are used for conditioning.
All child classes should implement `to_condition_attributes` that converts
the existing attributes to a dataclass of type ConditioningAttributes.
"""
def to_condition_attributes(self) -> ConditioningAttributes:
raise NotImplementedError()
def nullify_condition(condition: ConditionType, dim: int = 1):
"""Transform an input condition to a null condition.
The way it is done by converting it to a single zero vector similarly
to how it is done inside WhiteSpaceTokenizer and NoopTokenizer.
Args:
condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor])
dim (int): The dimension that will be truncated (should be the time dimension)
WARNING!: dim should not be the batch dimension!
Returns:
ConditionType: A tuple of null condition and mask
"""
assert dim != 0, "dim cannot be the batch dimension!"
assert isinstance(condition, tuple) and \
isinstance(condition[0], torch.Tensor) and \
isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!"
cond, mask = condition
B = cond.shape[0]
last_dim = cond.dim() - 1
out = cond.transpose(dim, last_dim)
out = 0. * out[..., :1]
out = out.transpose(dim, last_dim)
mask = torch.zeros((B, 1), device=out.device).int()
assert cond.dim() == out.dim()
return out, mask
def nullify_wav(cond: WavCondition) -> WavCondition:
"""Transform a WavCondition to a nullified WavCondition.
It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes.
Args:
cond (WavCondition): Wav condition with wav, tensor of shape [B, T].
Returns:
WavCondition: Nullified wav condition.
"""
null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1)
return WavCondition(
wav=null_wav,
length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device),
sample_rate=cond.sample_rate,
path=[None] * cond.wav.shape[0],
seek_time=[None] * cond.wav.shape[0],
)
def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition:
"""Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0,
and replacing metadata by dummy attributes.
Args:
cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T].
"""
null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1)
return JointEmbedCondition(
wav=null_wav, text=[None] * len(embed.text),
length=torch.LongTensor([0]).to(embed.wav.device),
sample_rate=embed.sample_rate,
path=[None] * embed.wav.shape[0],
seek_time=[0] * embed.wav.shape[0],
)
class Tokenizer:
"""Base tokenizer implementation
(in case we want to introduce more advances tokenizers in the future).
"""
def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError()
class WhiteSpaceTokenizer(Tokenizer):
"""This tokenizer should be used for natural language descriptions.
For example:
["he didn't, know he's going home.", 'shorter sentence'] =>
[[78, 62, 31, 4, 78, 25, 19, 34],
[59, 77, 0, 0, 0, 0, 0, 0]]
"""
PUNCTUATION = "?:!.,;"
def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm",
lemma: bool = True, stopwords: bool = True) -> None:
self.n_bins = n_bins
self.pad_idx = pad_idx
self.lemma = lemma
self.stopwords = stopwords
try:
self.nlp = spacy.load(language)
except IOError:
spacy.cli.download(language) # type: ignore
self.nlp = spacy.load(language)
@tp.no_type_check
def __call__(self, texts: tp.List[tp.Optional[str]],
return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Take a list of strings and convert them to a tensor of indices.
Args:
texts (list[str]): List of strings.
return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False.
Returns:
tuple[torch.Tensor, torch.Tensor]:
- Indices of words in the LUT.
- And a mask indicating where the padding tokens are
"""
output, lengths = [], []
texts = deepcopy(texts)
for i, text in enumerate(texts):
# if current sample doesn't have a certain attribute, replace with pad token
if text is None:
output.append(torch.Tensor([self.pad_idx]))
lengths.append(0)
continue
# convert numbers to words
text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore
# normalize text
text = self.nlp(text) # type: ignore
# remove stopwords
if self.stopwords:
text = [w for w in text if not w.is_stop] # type: ignore
# remove punctuation
text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore
# lemmatize if needed
text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore
texts[i] = " ".join(text)
lengths.append(len(text))
# convert to tensor
tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text])
output.append(tokens)
mask = length_to_mask(torch.IntTensor(lengths)).int()
padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t()
if return_text:
return padded_output, mask, texts # type: ignore
return padded_output, mask
class NoopTokenizer(Tokenizer):
"""This tokenizer should be used for global conditioners such as: artist, genre, key, etc.
The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split
strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will
split it to ["Jeff", "Buckley"] and return an index per word.
For example:
["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101]
["Metal", "Rock", "Classical"] => [0, 223, 51]
"""
def __init__(self, n_bins: int, pad_idx: int = 0):
self.n_bins = n_bins
self.pad_idx = pad_idx
def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
output, lengths = [], []
for text in texts:
# if current sample doesn't have a certain attribute, replace with pad token
if text is None:
output.append(self.pad_idx)
lengths.append(0)
else:
output.append(hash_trick(text, self.n_bins))
lengths.append(1)
tokens = torch.LongTensor(output).unsqueeze(1)
mask = length_to_mask(torch.IntTensor(lengths)).int()
return tokens, mask
class BaseConditioner(nn.Module):
"""Base model for all conditioner modules.
We allow the output dim to be different than the hidden dim for two reasons:
1) keep our LUTs small when the vocab is large;
2) make all condition dims consistent.
Args:
dim (int): Hidden dim of the model.
output_dim (int): Output dim of the conditioner.
"""
def __init__(self, dim: int, output_dim: int):
super().__init__()
self.dim = dim
self.output_dim = output_dim
self.output_proj = nn.Linear(dim, output_dim)
def tokenize(self, *args, **kwargs) -> tp.Any:
"""Should be any part of the processing that will lead to a synchronization
point, e.g. BPE tokenization with transfer to the GPU.
The returned value will be saved and return later when calling forward().
"""
raise NotImplementedError()
def forward(self, inputs: tp.Any) -> ConditionType:
"""Gets input that should be used as conditioning (e.g, genre, description or a waveform).
Outputs a ConditionType, after the input data was embedded as a dense vector.
Returns:
ConditionType:
- A tensor of size [B, T, D] where B is the batch size, T is the length of the
output embedding and D is the dimension of the embedding.
- And a mask indicating where the padding tokens.
"""
raise NotImplementedError()
class TextConditioner(BaseConditioner):
...
class LUTConditioner(TextConditioner):
"""Lookup table TextConditioner.
Args:
n_bins (int): Number of bins.
dim (int): Hidden dim of the model (text-encoder/LUT).
output_dim (int): Output dim of the conditioner.
tokenizer (str): Name of the tokenizer.
pad_idx (int, optional): Index for padding token. Defaults to 0.
"""
def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0):
super().__init__(dim, output_dim)
self.embed = nn.Embedding(n_bins, dim)
self.tokenizer: Tokenizer
if tokenizer == 'whitespace':
self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx)
elif tokenizer == 'noop':
self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx)
else:
raise ValueError(f"unrecognized tokenizer `{tokenizer}`.")
def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
device = self.embed.weight.device
tokens, mask = self.tokenizer(x)
tokens, mask = tokens.to(device), mask.to(device)
return tokens, mask
def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType:
tokens, mask = inputs
embeds = self.embed(tokens)
embeds = self.output_proj(embeds)
embeds = (embeds * mask.unsqueeze(-1))
return embeds, mask
class T5Conditioner(TextConditioner):
"""T5-based TextConditioner.
Args:
name (str): Name of the T5 model.
output_dim (int): Output dim of the conditioner.
finetune (bool): Whether to fine-tune T5 at train time.
device (str): Device for T5 Conditioner.
autocast_dtype (tp.Optional[str], optional): Autocast dtype.
word_dropout (float, optional): Word dropout probability.
normalize_text (bool, optional): Whether to apply text normalization.
"""
MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b",
"google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large",
"google/flan-t5-xl", "google/flan-t5-xxl"]
MODELS_DIMS = {
"t5-small": 512,
"t5-base": 768,
"t5-large": 1024,
"t5-3b": 1024,
"t5-11b": 1024,
"google/flan-t5-small": 512,
"google/flan-t5-base": 768,
"google/flan-t5-large": 1024,
"google/flan-t5-3b": 1024,
"google/flan-t5-11b": 1024,
}
def __init__(self, name: str, output_dim: int, finetune: bool, device: str,
autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0.,
normalize_text: bool = False):
assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})"
super().__init__(self.MODELS_DIMS[name], output_dim)
self.device = device
self.name = name
self.finetune = finetune
self.word_dropout = word_dropout
if autocast_dtype is None or self.device == 'cpu':
self.autocast = TorchAutocast(enabled=False)
if self.device != 'cpu':
logger.warning("T5 has no autocast, this might lead to NaN")
else:
dtype = getattr(torch, autocast_dtype)
assert isinstance(dtype, torch.dtype)
logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}")
self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
# Let's disable logging temporarily because T5 will vomit some errors otherwise.
# thanks https://gist.github.com/simon-weber/7853144
previous_level = logging.root.manager.disable
logging.disable(logging.ERROR)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
self.t5_tokenizer = T5Tokenizer.from_pretrained(name)
t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune)
finally:
logging.disable(previous_level)
if finetune:
self.t5 = t5
else:
# this makes sure that the t5 models is not part
# of the saved checkpoint
self.__dict__['t5'] = t5.to(device)
self.normalize_text = normalize_text
if normalize_text:
self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True)
def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]:
# if current sample doesn't have a certain attribute, replace with empty string
entries: tp.List[str] = [xi if xi is not None else "" for xi in x]
if self.normalize_text:
_, _, entries = self.text_normalizer(entries, return_text=True)
if self.word_dropout > 0. and self.training:
new_entries = []
for entry in entries:
words = [word for word in entry.split(" ") if random.random() >= self.word_dropout]
new_entries.append(" ".join(words))
entries = new_entries
empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""])
inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device)
mask = inputs['attention_mask']
mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
return inputs
def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType:
mask = inputs['attention_mask']
with torch.set_grad_enabled(self.finetune), self.autocast:
embeds = self.t5(**inputs).last_hidden_state
embeds = self.output_proj(embeds.to(self.output_proj.weight))
embeds = (embeds * mask.unsqueeze(-1))
return embeds, mask
class WaveformConditioner(BaseConditioner):
"""Base class for all conditioners that take a waveform as input.
Classes that inherit must implement `_get_wav_embedding` that outputs
a continuous tensor, and `_downsampling_factor` that returns the down-sampling
factor of the embedding model.
Args:
dim (int): The internal representation dimension.
output_dim (int): Output dimension.
device (tp.Union[torch.device, str]): Device.
"""
def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]):
super().__init__(dim, output_dim)
self.device = device
def tokenize(self, x: WavCondition) -> WavCondition:
wav, length, sample_rate, path, seek_time = x
assert length is not None
return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time)
def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor:
"""Gets as input a WavCondition and returns a dense embedding."""
raise NotImplementedError()
def _downsampling_factor(self):
"""Returns the downsampling factor of the embedding model."""
raise NotImplementedError()
def forward(self, x: WavCondition) -> ConditionType:
"""Extract condition embedding and mask from a waveform and its metadata.
Args:
x (WavCondition): Waveform condition containing raw waveform and metadata.
Returns:
ConditionType: a dense vector representing the conditioning along with its mask
"""
wav, lengths, *_ = x
with torch.no_grad():
embeds = self._get_wav_embedding(x)
embeds = embeds.to(self.output_proj.weight)
embeds = self.output_proj(embeds)
if lengths is not None:
lengths = lengths / self._downsampling_factor()
mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore
else:
mask = torch.ones_like(embeds)
embeds = (embeds * mask.unsqueeze(2).to(self.device))
return embeds, mask
class ChromaStemConditioner(WaveformConditioner):
"""Chroma conditioner based on stems.
The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as
the drums and bass often dominate the chroma leading to the chroma features
not containing information about the melody.
Args:
output_dim (int): Output dimension for the conditioner.
sample_rate (int): Sample rate for the chroma extractor.
n_chroma (int): Number of chroma bins for the chroma extractor.
radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12).
duration (int): duration used during training. This is later used for correct padding
in case we are using chroma as prefix.
match_len_on_eval (bool, optional): if True then all chromas are padded to the training
duration. Defaults to False.
eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as
conditions during eval (for cases where we don't want to leak test conditions like MusicCaps).
Defaults to None.
n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0.
device (tp.Union[torch.device, str], optional): Device for the conditioner.
**kwargs: Additional parameters for the chroma extractor.
"""
def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int,
duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None,
n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None,
device: tp.Union[torch.device, str] = 'cpu', **kwargs):
from demucs import pretrained
super().__init__(dim=n_chroma, output_dim=output_dim, device=device)
self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32)
self.sample_rate = sample_rate
self.match_len_on_eval = match_len_on_eval
self.duration = duration
self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device)
stem_sources: list = self.demucs.sources # type: ignore
self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device)
self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma,
radix2_exp=radix2_exp, **kwargs).to(device)
self.chroma_len = self._get_chroma_len()
self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs)
self.cache = None
if cache_path is not None:
self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device,
compute_embed_fn=self._get_full_chroma_for_cache,
extract_embed_fn=self._extract_chroma_chunk)
def _downsampling_factor(self) -> int:
return self.chroma.winhop
def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]:
"""Load pre-defined waveforms from a json.
These waveforms will be used for chroma extraction during evaluation.
This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps).
"""
if path is None:
return None
logger.info(f"Loading evaluation wavs from {path}")
from audiocraft.data.audio_dataset import AudioDataset
dataset: AudioDataset = AudioDataset.from_meta(
path, segment_duration=self.duration, min_audio_duration=self.duration,
sample_rate=self.sample_rate, channels=1)
if len(dataset) > 0:
eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device)
logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner")
return eval_wavs
else:
raise ValueError("Could not find evaluation wavs, check lengths of wavs")
def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None:
self.eval_wavs = eval_wavs
def has_eval_wavs(self) -> bool:
return self.eval_wavs is not None
def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor:
"""Sample wavs from a predefined list."""
assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided."
total_eval_wavs = len(self.eval_wavs)
out = self.eval_wavs
if num_samples > total_eval_wavs:
out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1)
return out[torch.randperm(len(out))][:num_samples]
def _get_chroma_len(self) -> int:
"""Get length of chroma during training."""
dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device)
dummy_chr = self.chroma(dummy_wav)
return dummy_chr.shape[1]
@torch.no_grad()
def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
"""Get parts of the wav that holds the melody, extracting the main stems from the wav."""
from demucs.apply import apply_model
from demucs.audio import convert_audio
with self.autocast:
wav = convert_audio(
wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore
stems = apply_model(self.demucs, wav, device=self.device)
stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning
mix_wav = stems.sum(1) # merge extracted stems to single waveform
mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore
return mix_wav
@torch.no_grad()
def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor:
"""Extract chroma features from the waveform."""
with self.autocast:
return self.chroma(wav)
@torch.no_grad()
def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
"""Compute wav embedding, applying stem and chroma extraction."""
# avoid 0-size tensors when we are working with null conds
if wav.shape[-1] == 1:
return self._extract_chroma(wav)
stems = self._get_stemmed_wav(wav, sample_rate)
chroma = self._extract_chroma(stems)
return chroma
@torch.no_grad()
def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor:
"""Extract chroma from the whole audio waveform at the given path."""
wav, sr = audio_read(path)
wav = wav[None].to(self.device)
wav = convert_audio(wav, sr, self.sample_rate, to_channels=1)
chroma = self._compute_wav_embedding(wav, self.sample_rate)[0]
return chroma
def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor:
"""Extract a chunk of chroma from the full chroma derived from the full waveform."""
wav_length = x.wav.shape[-1]
seek_time = x.seek_time[idx]
assert seek_time is not None, (
"WavCondition seek_time is required "
"when extracting chroma chunks from pre-computed chroma.")
full_chroma = full_chroma.float()
frame_rate = self.sample_rate / self._downsampling_factor()
target_length = int(frame_rate * wav_length / self.sample_rate)
index = int(frame_rate * seek_time)
out = full_chroma[index: index + target_length]
out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0]
return out.to(self.device)
@torch.no_grad()
def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor:
"""Get the wav embedding from the WavCondition.
The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly
or will rely on the embedding cache to load the pre-computed embedding if relevant.
"""
sampled_wav: tp.Optional[torch.Tensor] = None
if not self.training and self.eval_wavs is not None:
warn_once(logger, "Using precomputed evaluation wavs!")
sampled_wav = self._sample_eval_wavs(len(x.wav))
no_undefined_paths = all(p is not None for p in x.path)
no_nullified_cond = x.wav.shape[-1] > 1
if sampled_wav is not None:
chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate)
elif self.cache is not None and no_undefined_paths and no_nullified_cond:
paths = [Path(p) for p in x.path if p is not None]
chroma = self.cache.get_embed_from_cache(paths, x)
else:
assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal."
chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0])
if self.match_len_on_eval:
B, T, C = chroma.shape
if T > self.chroma_len:
chroma = chroma[:, :self.chroma_len]
logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})")
elif T < self.chroma_len:
n_repeat = int(math.ceil(self.chroma_len / T))
chroma = chroma.repeat(1, n_repeat, 1)
chroma = chroma[:, :self.chroma_len]
logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})")
return chroma
def tokenize(self, x: WavCondition) -> WavCondition:
"""Apply WavConditioner tokenization and populate cache if needed."""
x = super().tokenize(x)
no_undefined_paths = all(p is not None for p in x.path)
if self.cache is not None and no_undefined_paths:
paths = [Path(p) for p in x.path if p is not None]
self.cache.populate_embed_cache(paths, x)
return x
class JointEmbeddingConditioner(BaseConditioner):
"""Joint embedding conditioning supporting both audio or text conditioning.
Args:
dim (int): Dimension.
output_dim (int): Output dimension.
device (str): Device.
attribute (str): Attribute used by the conditioner.
autocast_dtype (str): Autocast for the conditioner.
quantize (bool): Whether to quantize the CLAP embedding.
n_q (int): Number of residual quantizers (used if quantize is true).
bins (int): Quantizers' codebooks size (used if quantize is true).
kwargs: Additional parameters for residual vector quantizer.
"""
def __init__(self, dim: int, output_dim: int, device: str, attribute: str,
autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True,
n_q: int = 12, bins: int = 1024, **kwargs):
super().__init__(dim=dim, output_dim=output_dim)
self.device = device
self.attribute = attribute
if autocast_dtype is None or device == 'cpu':
self.autocast = TorchAutocast(enabled=False)
logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.")
else:
dtype = getattr(torch, autocast_dtype)
assert isinstance(dtype, torch.dtype)
logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.")
self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
# residual vector quantizer to discretize the conditioned embedding
self.quantizer: tp.Optional[ResidualVectorQuantizer] = None
if quantize:
self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs)
def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Get joint embedding in latent space from the inputs.
Returns:
tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding
and corresponding empty indexes.
"""
raise NotImplementedError()
def forward(self, x: JointEmbedCondition) -> ConditionType:
with self.autocast:
embed, empty_idx = self._get_embed(x)
if self.quantizer is not None:
embed = embed.view(-1, self.dim, 1)
q_res = self.quantizer(embed, frame_rate=1)
out_embed = q_res.x.view(-1, self.dim)
else:
out_embed = embed
out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim)
mask = torch.ones(*out_embed.shape[:2], device=out_embed.device)
mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
out_embed = (out_embed * mask.unsqueeze(-1))
return out_embed, mask
def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition:
return x
class CLAPEmbeddingConditioner(JointEmbeddingConditioner):
"""Joint Embedding conditioner based on pre-trained CLAP model.
This CLAP-based conditioner supports a caching mechanism
over the computed embeddings for faster training.
Args:
dim (int): Dimension.
output_dim (int): Output dimension.
device (str): Device.
attribute (str): Attribute used by the conditioner.
quantize (bool): Whether to quantize the CLAP embedding.
n_q (int): Number of residual quantizers (used if quantize is true).
bins (int): Quantizers' codebooks size (used if quantize is true).
checkpoint (str): Path to CLAP checkpoint.
model_arch (str): CLAP model architecture.
enable_fusion (bool): Enable fusion for CLAP model.
sample_rate (int): Sample rate used by CLAP model.
max_audio_length (float): Maximum audio length for CLAP model.
audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence.
normalize (bool): Whether to normalize the CLAP embedding.
text_p (float): Probability of using text representation instead of audio at train time.
batch_size (Optional[int]): Batch size for CLAP embedding computation.
autocast_dtype (str): Autocast for the conditioner.
cache_path (Optional[str]): Path for pre-computed embeddings caching.
kwargs: Additional parameters for residual vector quantizer.
"""
def __init__(self, dim: int, output_dim: int, device: str, attribute: str,
quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str,
enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int,
normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None,
autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs):
try:
import laion_clap # type: ignore
except ImportError:
raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'")
checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint)
clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base')
clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch)
load_clap_state_dict(clap_model, checkpoint)
clap_model.eval()
clap_model.to(device)
super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute,
autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins,
**kwargs)
self.checkpoint = checkpoint
self.enable_fusion = enable_fusion
self.model_arch = model_arch
self.clap: laion_clap.CLAP_Module
self.clap_tokenize: RobertaTokenizer
self.clap_sample_rate = sample_rate
self.clap_max_frames = int(self.clap_sample_rate * max_audio_length)
self.clap_stride = int(self.clap_sample_rate * audio_stride)
self.batch_size = batch_size or 1
self.normalize = normalize
self.text_p = text_p
self.__dict__['clap_tokenize'] = clap_tokenize
self.__dict__['clap'] = clap_model
self.wav_cache, self.text_cache = None, None
if cache_path is not None:
self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device,
compute_embed_fn=self._get_wav_embedding_for_cache,
extract_embed_fn=self._extract_wav_embedding_chunk)
self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device,
compute_embed_fn=self._get_text_embedding_for_cache)
def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict:
# we use the default params from CLAP module here as well
return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt")
def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor:
"""Compute text embedding from CLAP model on a given a batch of text.
Args:
text (list[str]): List of text for the batch, with B items.
Returns:
torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension.
"""
with torch.no_grad():
embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True)
return embed.view(embed.size(0), 1, embed.size(-1))
def _get_text_embedding_for_cache(self, path: tp.Union[Path, str],
x: JointEmbedCondition, idx: int) -> torch.Tensor:
"""Get text embedding function for the cache."""
text = x.text[idx]
text = text if text is not None else ""
return self._compute_text_embedding([text])[0]
def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor:
"""Preprocess wav to expected format by CLAP model.
Args:
wav (torch.Tensor): Audio wav, of shape [B, C, T].
length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B].
sample_rates (list[int]): Sample rates for each sample in the batch
Returns:
torch.Tensor: Audio wav of shape [B, T].
"""
assert wav.dim() == 3, "Expecting wav to be [B, C, T]"
if sample_rates is not None:
_wav = []
for i, audio in enumerate(wav):
sr = sample_rates[i]
audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1)
_wav.append(audio)
wav = torch.stack(_wav, dim=0)
wav = wav.mean(dim=1)
return wav
def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor,
sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor:
"""Compute audio wave embedding from CLAP model.
Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences,
we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and
average the resulting embeddings.
Args:
wav (torch.Tensor): Audio wav, of shape [B, C, T].
length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B].
sample_rates (list[int]): Sample rates for each sample in the batch.
reduce_mean (bool): Whether to get the average tensor.
Returns:
torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension.
"""
with torch.no_grad():
wav = self._preprocess_wav(wav, length, sample_rates)
B, T = wav.shape
if T >= self.clap_max_frames:
wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T]
else:
wav = wav.view(-1, 1, T) # [B, F, T] with F=1
wav = einops.rearrange(wav, 'b f t -> (b f) t')
embed_list = []
for i in range(0, wav.size(0), self.batch_size):
_wav = wav[i:i+self.batch_size, ...]
_embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True)
embed_list.append(_embed)
embed = torch.cat(embed_list, dim=0)
embed = einops.rearrange(embed, '(b f) d -> b f d', b=B)
if reduce_mean:
embed = embed.mean(dim=1, keepdim=True)
return embed # [B, F, D] with F=1 if reduce_mean is True
def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path],
x: JointEmbedCondition, idx: int) -> torch.Tensor:
"""Compute audio wave embedding for the cache.
The embedding is computed on a given audio read from file.
Args:
path (str or Path): Path to the full audio file.
Returns:
torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension.
"""
wav, sr = audio_read(path) # [C, T]
wav = wav.unsqueeze(0).to(self.device) # [1, C, T]
wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device)
embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D]
return embed.squeeze(0) # [F, D]
def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor:
"""Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding.
Args:
full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D].
x (JointEmbedCondition): Joint embedding condition for the full batch.
idx (int): Index considered for the given embedding to extract.
Returns:
torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D].
"""
sample_rate = x.sample_rate[idx]
seek_time = x.seek_time[idx]
seek_time = 0. if seek_time is None else seek_time
clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate
end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate
start_offset = int(seek_time * sample_rate // clap_stride)
end_offset = int(end_seek_time * sample_rate // clap_stride)
wav_embed = full_embed[start_offset:end_offset, ...]
wav_embed = wav_embed.mean(dim=0, keepdim=True)
return wav_embed.to(self.device) # [F, D]
def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor:
"""Get CLAP embedding from a batch of text descriptions."""
no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout
if self.text_cache is not None and no_nullified_cond:
assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided"
paths = [Path(p) for p in x.path if p is not None]
embed = self.text_cache.get_embed_from_cache(paths, x)
else:
text = [xi if xi is not None else "" for xi in x.text]
embed = self._compute_text_embedding(text)
if self.normalize:
embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1)
return embed
def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor:
"""Get CLAP embedding from a batch of audio tensors (and corresponding sample rates)."""
no_undefined_paths = all(p is not None for p in x.path)
no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout
if self.wav_cache is not None and no_undefined_paths and no_nullified_cond:
paths = [Path(p) for p in x.path if p is not None]
embed = self.wav_cache.get_embed_from_cache(paths, x)
else:
embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True)
if self.normalize:
embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1)
return embed
def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition:
# Trying to limit as much as possible sync points when the cache is warm.
no_undefined_paths = all(p is not None for p in x.path)
if self.wav_cache is not None and no_undefined_paths:
assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided"
paths = [Path(p) for p in x.path if p is not None]
self.wav_cache.populate_embed_cache(paths, x)
if self.text_cache is not None and no_undefined_paths:
assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided"
paths = [Path(p) for p in x.path if p is not None]
self.text_cache.populate_embed_cache(paths, x)
return x
def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Extract shared latent representation from either the wav or the text using CLAP."""
# decide whether to use text embedding at train time or not
use_text_embed = random.random() < self.text_p
if self.training and not use_text_embed:
embed = self._get_wav_embedding(x)
empty_idx = torch.LongTensor([]) # we assume we always have the audio wav
else:
embed = self._get_text_embedding(x)
empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""])
return embed, empty_idx
def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes:
"""Utility function for nullifying an attribute inside an ConditioningAttributes object.
If the condition is of type "wav", then nullify it using `nullify_condition` function.
If the condition is of any other type, set its value to None.
Works in-place.
"""
if condition_type not in ['text', 'wav', 'joint_embed']:
raise ValueError(
"dropout_condition got an unexpected condition type!"
f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'"
)
if condition not in getattr(sample, condition_type):
raise ValueError(
"dropout_condition received an unexpected condition!"
f" expected wav={sample.wav.keys()} and text={sample.text.keys()}"
f" but got '{condition}' of type '{condition_type}'!"
)
if condition_type == 'wav':
wav_cond = sample.wav[condition]
sample.wav[condition] = nullify_wav(wav_cond)
elif condition_type == 'joint_embed':
embed = sample.joint_embed[condition]
sample.joint_embed[condition] = nullify_joint_embed(embed)
else:
sample.text[condition] = None
return sample
class DropoutModule(nn.Module):
"""Base module for all dropout modules."""
def __init__(self, seed: int = 1234):
super().__init__()
self.rng = torch.Generator()
self.rng.manual_seed(seed)
class AttributeDropout(DropoutModule):
"""Dropout with a given probability per attribute.
This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes
to be dropped out separately. For example, "artist" can be dropped while "genre" remains.
This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre"
must also be dropped.
Args:
p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example:
...
"genre": 0.1,
"artist": 0.5,
"wav": 0.25,
...
active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False.
seed (int, optional): Random seed.
"""
def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234):
super().__init__(seed=seed)
self.active_on_eval = active_on_eval
# construct dict that return the values from p otherwise 0
self.p = {}
for condition_type, probs in p.items():
self.p[condition_type] = defaultdict(lambda: 0, probs)
def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
"""
Args:
samples (list[ConditioningAttributes]): List of conditions.
Returns:
list[ConditioningAttributes]: List of conditions after certain attributes were set to None.
"""
if not self.training and not self.active_on_eval:
return samples
samples = deepcopy(samples)
for condition_type, ps in self.p.items(): # for condition types [text, wav]
for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre])
if torch.rand(1, generator=self.rng).item() < p:
for sample in samples:
dropout_condition(sample, condition_type, condition)
return samples
def __repr__(self):
return f"AttributeDropout({dict(self.p)})"
class ClassifierFreeGuidanceDropout(DropoutModule):
"""Classifier Free Guidance dropout.
All attributes are dropped with the same probability.
Args:
p (float): Probability to apply condition dropout during training.
seed (int): Random seed.
"""
def __init__(self, p: float, seed: int = 1234):
super().__init__(seed=seed)
self.p = p
def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
"""
Args:
samples (list[ConditioningAttributes]): List of conditions.
Returns:
list[ConditioningAttributes]: List of conditions after all attributes were set to None.
"""
if not self.training:
return samples
# decide on which attributes to drop in a batched fashion
drop = torch.rand(1, generator=self.rng).item() < self.p
if not drop:
return samples
# nullify conditions of all attributes
samples = deepcopy(samples)
for condition_type in ["wav", "text"]:
for sample in samples:
for condition in sample.attributes[condition_type]:
dropout_condition(sample, condition_type, condition)
return samples
def __repr__(self):
return f"ClassifierFreeGuidanceDropout(p={self.p})"
class ConditioningProvider(nn.Module):
"""Prepare and provide conditions given all the supported conditioners.
Args:
conditioners (dict): Dictionary of conditioners.
device (torch.device or str, optional): Device for conditioners and output condition types.
"""
def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"):
super().__init__()
self.device = device
self.conditioners = nn.ModuleDict(conditioners)
@property
def joint_embed_conditions(self):
return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)]
@property
def has_joint_embed_conditions(self):
return len(self.joint_embed_conditions) > 0
@property
def text_conditions(self):
return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)]
@property
def wav_conditions(self):
return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)]
@property
def has_wav_condition(self):
return len(self.wav_conditions) > 0
def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]:
"""Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly.
This should be called before starting any real GPU work to avoid synchronization points.
This will return a dict matching conditioner names to their arbitrary tokenized representations.
Args:
inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing
text and wav conditions.
"""
assert all([isinstance(x, ConditioningAttributes) for x in inputs]), (
"Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]",
f" but types were {set([type(x) for x in inputs])}"
)
output = {}
text = self._collate_text(inputs)
wavs = self._collate_wavs(inputs)
joint_embeds = self._collate_joint_embeds(inputs)
assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), (
f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ",
f"got {text.keys(), wavs.keys(), joint_embeds.keys()}"
)
for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()):
output[attribute] = self.conditioners[attribute].tokenize(batch)
return output
def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]:
"""Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations.
The output is for example:
{
"genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])),
"description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])),
...
}
Args:
tokenized (dict): Dict of tokenized representations as returned by `tokenize()`.
"""
output = {}
for attribute, inputs in tokenized.items():
condition, mask = self.conditioners[attribute](inputs)
output[attribute] = (condition, mask)
return output
def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]:
"""Given a list of ConditioningAttributes objects, compile a dictionary where the keys
are the attributes and the values are the aggregated input per attribute.
For example:
Input:
[
ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...),
ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...),
]
Output:
{
"genre": ["Rock", "Hip-hop"],
"description": ["A rock song with a guitar solo", "A hip-hop verse"]
}
Args:
samples (list of ConditioningAttributes): List of ConditioningAttributes samples.
Returns:
dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch.
"""
out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list)
texts = [x.text for x in samples]
for text in texts:
for condition in self.text_conditions:
out[condition].append(text[condition])
return out
def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, WavCondition]:
"""Generate a dict where the keys are attributes by which we fetch similar wavs,
and the values are Tensors of wavs according to said attributes.
*Note*: by the time the samples reach this function, each sample should have some waveform
inside the "wav" attribute. It should be either:
1. A real waveform
2. A null waveform due to the sample having no similar waveforms (nullified by the dataset)
3. A null waveform due to it being dropped in a dropout module (nullified by dropout)
Args:
samples (list of ConditioningAttributes): List of ConditioningAttributes samples.
Returns:
dict[str, WavCondition]: A dictionary mapping an attribute name to wavs.
"""
wavs = defaultdict(list)
lengths = defaultdict(list)
sample_rates = defaultdict(list)
paths = defaultdict(list)
seek_times = defaultdict(list)
out: tp.Dict[str, WavCondition] = {}
for sample in samples:
for attribute in self.wav_conditions:
wav, length, sample_rate, path, seek_time = sample.wav[attribute]
assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]"
assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1"
# mono-channel conditioning
wav = wav.mean(1, keepdim=True) # [1, 1, T]
wavs[attribute].append(wav.flatten()) # [T]
lengths[attribute].append(length)
sample_rates[attribute].extend(sample_rate)
paths[attribute].extend(path)
seek_times[attribute].extend(seek_time)
# stack all wavs to a single tensor
for attribute in self.wav_conditions:
stacked_wav, _ = collate(wavs[attribute], dim=0)
out[attribute] = WavCondition(
stacked_wav.unsqueeze(1), torch.cat(lengths[attribute]), sample_rates[attribute],
paths[attribute], seek_times[attribute])
return out
def _collate_joint_embeds(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, JointEmbedCondition]:
"""Generate a dict where the keys are attributes by which we compute joint embeddings,
and the values are Tensors of pre-computed embeddings and the corresponding text attributes.
Args:
samples (list[ConditioningAttributes]): List of ConditioningAttributes samples.
Returns:
A dictionary mapping an attribute name to joint embeddings.
"""
texts = defaultdict(list)
wavs = defaultdict(list)
lengths = defaultdict(list)
sample_rates = defaultdict(list)
paths = defaultdict(list)
seek_times = defaultdict(list)
channels: int = 0
out = {}
for sample in samples:
for attribute in self.joint_embed_conditions:
wav, text, length, sample_rate, path, seek_time = sample.joint_embed[attribute]
assert wav.dim() == 3
if channels == 0:
channels = wav.size(1)
else:
assert channels == wav.size(1), "not all audio has same number of channels in batch"
assert wav.size(0) == 1, "Expecting single-wav batch in the collate method"
wav = einops.rearrange(wav, "b c t -> (b c t)") # [1, C, T] => [C * T]
wavs[attribute].append(wav)
texts[attribute].extend(text)
lengths[attribute].append(length)
sample_rates[attribute].extend(sample_rate)
paths[attribute].extend(path)
seek_times[attribute].extend(seek_time)
for attribute in self.joint_embed_conditions:
stacked_texts = texts[attribute]
stacked_paths = paths[attribute]
stacked_seek_times = seek_times[attribute]
stacked_wavs = pad_sequence(wavs[attribute]).to(self.device)
stacked_wavs = einops.rearrange(stacked_wavs, "(c t) b -> b c t", c=channels)
stacked_sample_rates = sample_rates[attribute]
stacked_lengths = torch.cat(lengths[attribute]).to(self.device)
assert stacked_lengths.size(0) == stacked_wavs.size(0)
assert len(stacked_sample_rates) == stacked_wavs.size(0)
assert len(stacked_texts) == stacked_wavs.size(0)
out[attribute] = JointEmbedCondition(
text=stacked_texts, wav=stacked_wavs,
length=stacked_lengths, sample_rate=stacked_sample_rates,
path=stacked_paths, seek_time=stacked_seek_times)
return out
class ConditionFuser(StreamingModule):
"""Condition fuser handles the logic to combine the different conditions
to the actual model input.
Args:
fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse
each condition. For example:
{
"prepend": ["description"],
"sum": ["genre", "bpm"],
"cross": ["description"],
}
cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention.
cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used.
"""
FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"]
def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False,
cross_attention_pos_emb_scale: float = 1.0):
super().__init__()
assert all(
[k in self.FUSING_METHODS for k in fuse2cond.keys()]
), f"Got invalid fuse method, allowed methods: {self.FUSING_METHODS}"
self.cross_attention_pos_emb = cross_attention_pos_emb
self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale
self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond
self.cond2fuse: tp.Dict[str, str] = {}
for fuse_method, conditions in fuse2cond.items():
for condition in conditions:
self.cond2fuse[condition] = fuse_method
def forward(
self,
input: torch.Tensor,
conditions: tp.Dict[str, ConditionType]
) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
"""Fuse the conditions to the provided model input.
Args:
input (torch.Tensor): Transformer input.
conditions (dict[str, ConditionType]): Dict of conditions.
Returns:
tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input
after the conditions have been fused. The second output tensor is the tensor
used for cross-attention or None if no cross attention inputs exist.
"""
B, T, _ = input.shape
if 'offsets' in self._streaming_state:
first_step = False
offsets = self._streaming_state['offsets']
else:
first_step = True
offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device)
assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \
f"given conditions contain unknown attributes for fuser, " \
f"expected {self.cond2fuse.keys()}, got {conditions.keys()}"
cross_attention_output = None
for cond_type, (cond, cond_mask) in conditions.items():
op = self.cond2fuse[cond_type]
if op == 'sum':
input += cond
elif op == 'input_interpolate':
cond = einops.rearrange(cond, "b t d -> b d t")
cond = F.interpolate(cond, size=input.shape[1])
input += einops.rearrange(cond, "b d t -> b t d")
elif op == 'prepend':
if first_step:
input = torch.cat([cond, input], dim=1)
elif op == 'cross':
if cross_attention_output is not None:
cross_attention_output = torch.cat([cross_attention_output, cond], dim=1)
else:
cross_attention_output = cond
else:
raise ValueError(f"unknown op ({op})")
if self.cross_attention_pos_emb and cross_attention_output is not None:
positions = torch.arange(
cross_attention_output.shape[1],
device=cross_attention_output.device
).view(1, -1, 1)
pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1])
cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb
if self._is_streaming:
self._streaming_state['offsets'] = offsets + T
return input, cross_attention_output
| audiocraft-main | audiocraft/modules/conditioners.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""AudioDataset support. In order to handle a larger number of files
without having to scan again the folders, we precompute some metadata
(filename, sample rate, duration), and use that to efficiently sample audio segments.
"""
import argparse
import copy
from concurrent.futures import ThreadPoolExecutor, Future
from dataclasses import dataclass, fields
from contextlib import ExitStack
from functools import lru_cache
import gzip
import json
import logging
import os
from pathlib import Path
import random
import sys
import typing as tp
import torch
import torch.nn.functional as F
from .audio import audio_read, audio_info
from .audio_utils import convert_audio
from .zip import PathInZip
try:
import dora
except ImportError:
dora = None # type: ignore
@dataclass(order=True)
class BaseInfo:
@classmethod
def _dict2fields(cls, dictionary: dict):
return {
field.name: dictionary[field.name]
for field in fields(cls) if field.name in dictionary
}
@classmethod
def from_dict(cls, dictionary: dict):
_dictionary = cls._dict2fields(dictionary)
return cls(**_dictionary)
def to_dict(self):
return {
field.name: self.__getattribute__(field.name)
for field in fields(self)
}
@dataclass(order=True)
class AudioMeta(BaseInfo):
path: str
duration: float
sample_rate: int
amplitude: tp.Optional[float] = None
weight: tp.Optional[float] = None
# info_path is used to load additional information about the audio file that is stored in zip files.
info_path: tp.Optional[PathInZip] = None
@classmethod
def from_dict(cls, dictionary: dict):
base = cls._dict2fields(dictionary)
if 'info_path' in base and base['info_path'] is not None:
base['info_path'] = PathInZip(base['info_path'])
return cls(**base)
def to_dict(self):
d = super().to_dict()
if d['info_path'] is not None:
d['info_path'] = str(d['info_path'])
return d
@dataclass(order=True)
class SegmentInfo(BaseInfo):
meta: AudioMeta
seek_time: float
# The following values are given once the audio is processed, e.g.
# at the target sample rate and target number of channels.
n_frames: int # actual number of frames without padding
total_frames: int # total number of frames, padding included
sample_rate: int # actual sample rate
channels: int # number of audio channels.
DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a']
logger = logging.getLogger(__name__)
def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta:
"""AudioMeta from a path to an audio file.
Args:
file_path (str): Resolved path of valid audio file.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
Returns:
AudioMeta: Audio file path and its metadata.
"""
info = audio_info(file_path)
amplitude: tp.Optional[float] = None
if not minimal:
wav, sr = audio_read(file_path)
amplitude = wav.abs().max().item()
return AudioMeta(file_path, info.duration, info.sample_rate, amplitude)
def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta:
"""If Dora is available as a dependency, try to resolve potential relative paths
in list of AudioMeta. This method is expected to be used when loading meta from file.
Args:
m (AudioMeta): Audio meta to resolve.
fast (bool): If True, uses a really fast check for determining if a file
is already absolute or not. Only valid on Linux/Mac.
Returns:
AudioMeta: Audio meta with resolved path.
"""
def is_abs(m):
if fast:
return str(m)[0] == '/'
else:
os.path.isabs(str(m))
if not dora:
return m
if not is_abs(m.path):
m.path = dora.git_save.to_absolute_path(m.path)
if m.info_path is not None and not is_abs(m.info_path.zip_path):
m.info_path.zip_path = dora.git_save.to_absolute_path(m.path)
return m
def find_audio_files(path: tp.Union[Path, str],
exts: tp.List[str] = DEFAULT_EXTS,
resolve: bool = True,
minimal: bool = True,
progress: bool = False,
workers: int = 0) -> tp.List[AudioMeta]:
"""Build a list of AudioMeta from a given path,
collecting relevant audio files and fetching meta info.
Args:
path (str or Path): Path to folder containing audio files.
exts (list of str): List of file extensions to consider for audio files.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
progress (bool): Whether to log progress on audio files collection.
workers (int): number of parallel workers, if 0, use only the current thread.
Returns:
list of AudioMeta: List of audio file path and its metadata.
"""
audio_files = []
futures: tp.List[Future] = []
pool: tp.Optional[ThreadPoolExecutor] = None
with ExitStack() as stack:
if workers > 0:
pool = ThreadPoolExecutor(workers)
stack.enter_context(pool)
if progress:
print("Finding audio files...")
for root, folders, files in os.walk(path, followlinks=True):
for file in files:
full_path = Path(root) / file
if full_path.suffix.lower() in exts:
audio_files.append(full_path)
if pool is not None:
futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal))
if progress:
print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr)
if progress:
print("Getting audio metadata...")
meta: tp.List[AudioMeta] = []
for idx, file_path in enumerate(audio_files):
try:
if pool is None:
m = _get_audio_meta(str(file_path), minimal)
else:
m = futures[idx].result()
if resolve:
m = _resolve_audio_meta(m)
except Exception as err:
print("Error with", str(file_path), err, file=sys.stderr)
continue
meta.append(m)
if progress:
print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr)
meta.sort()
return meta
def load_audio_meta(path: tp.Union[str, Path],
resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]:
"""Load list of AudioMeta from an optionally compressed json file.
Args:
path (str or Path): Path to JSON file.
resolve (bool): Whether to resolve the path from AudioMeta (default=True).
fast (bool): activates some tricks to make things faster.
Returns:
list of AudioMeta: List of audio file path and its total duration.
"""
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
with open_fn(path, 'rb') as fp: # type: ignore
lines = fp.readlines()
meta = []
for line in lines:
d = json.loads(line)
m = AudioMeta.from_dict(d)
if resolve:
m = _resolve_audio_meta(m, fast=fast)
meta.append(m)
return meta
def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]):
"""Save the audio metadata to the file pointer as json.
Args:
path (str or Path): Path to JSON file.
metadata (list of BaseAudioMeta): List of audio meta to save.
"""
Path(path).parent.mkdir(exist_ok=True, parents=True)
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
with open_fn(path, 'wb') as fp: # type: ignore
for m in meta:
json_str = json.dumps(m.to_dict()) + '\n'
json_bytes = json_str.encode('utf-8')
fp.write(json_bytes)
class AudioDataset:
"""Base audio dataset.
The dataset takes a list of AudioMeta and create a dataset composed of segments of audio
and potentially additional information, by creating random segments from the list of audio
files referenced in the metadata and applying minimal data pre-processing such as resampling,
mixing of channels, padding, etc.
If no segment_duration value is provided, the AudioDataset will return the full wav for each
audio file. Otherwise, it will randomly sample audio files and create a segment of the specified
duration, applying padding if required.
By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True
allows to return a tuple containing the torch Tensor and additional metadata on the segment and the
original audio meta.
Note that you can call `start_epoch(epoch)` in order to get
a deterministic "randomization" for `shuffle=True`.
For a given epoch and dataset index, this will always return the same extract.
You can get back some diversity by setting the `shuffle_seed` param.
Args:
meta (list of AudioMeta): List of audio files metadata.
segment_duration (float, optional): Optional segment duration of audio to load.
If not specified, the dataset will load the full audio segment from the file.
shuffle (bool): Set to `True` to have the data reshuffled at every epoch.
sample_rate (int): Target sample rate of the loaded audio samples.
channels (int): Target number of channels of the loaded audio samples.
sample_on_duration (bool): Set to `True` to sample segments with probability
dependent on audio file duration. This is only used if `segment_duration` is provided.
sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of
`AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product
of the file duration and file weight. This is only used if `segment_duration` is provided.
min_segment_ratio (float): Minimum segment ratio to use when the audio file
is shorter than the desired segment.
max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset.
return_info (bool): Whether to return the wav only or return wav along with segment info and metadata.
min_audio_duration (float, optional): Minimum audio file duration, in seconds, if provided
audio shorter than this will be filtered out.
max_audio_duration (float, optional): Maximal audio file duration in seconds, if provided
audio longer than this will be filtered out.
shuffle_seed (int): can be used to further randomize
load_wav (bool): if False, skip loading the wav but returns a tensor of 0
with the expected segment_duration (which must be provided if load_wav is False).
permutation_on_files (bool): only if `sample_on_weight` and `sample_on_duration`
are False. Will ensure a permutation on files when going through the dataset.
In that case the epoch number must be provided in order for the model
to continue the permutation across epochs. In that case, it is assumed
that `num_samples = total_batch_size * num_updates_per_epoch`, with
`total_batch_size` the overall batch size accounting for all gpus.
"""
def __init__(self,
meta: tp.List[AudioMeta],
segment_duration: tp.Optional[float] = None,
shuffle: bool = True,
num_samples: int = 10_000,
sample_rate: int = 48_000,
channels: int = 2,
pad: bool = True,
sample_on_duration: bool = True,
sample_on_weight: bool = True,
min_segment_ratio: float = 0.5,
max_read_retry: int = 10,
return_info: bool = False,
min_audio_duration: tp.Optional[float] = None,
max_audio_duration: tp.Optional[float] = None,
shuffle_seed: int = 0,
load_wav: bool = True,
permutation_on_files: bool = False,
):
assert len(meta) > 0, "No audio meta provided to AudioDataset. Please check loading of audio meta."
assert segment_duration is None or segment_duration > 0
assert segment_duration is None or min_segment_ratio >= 0
self.segment_duration = segment_duration
self.min_segment_ratio = min_segment_ratio
self.max_audio_duration = max_audio_duration
self.min_audio_duration = min_audio_duration
if self.min_audio_duration is not None and self.max_audio_duration is not None:
assert self.min_audio_duration <= self.max_audio_duration
self.meta: tp.List[AudioMeta] = self._filter_duration(meta)
assert len(self.meta) # Fail fast if all data has been filtered.
self.total_duration = sum(d.duration for d in self.meta)
if segment_duration is None:
num_samples = len(self.meta)
self.num_samples = num_samples
self.shuffle = shuffle
self.sample_rate = sample_rate
self.channels = channels
self.pad = pad
self.sample_on_weight = sample_on_weight
self.sample_on_duration = sample_on_duration
self.sampling_probabilities = self._get_sampling_probabilities()
self.max_read_retry = max_read_retry
self.return_info = return_info
self.shuffle_seed = shuffle_seed
self.current_epoch: tp.Optional[int] = None
self.load_wav = load_wav
if not load_wav:
assert segment_duration is not None
self.permutation_on_files = permutation_on_files
if permutation_on_files:
assert not self.sample_on_duration
assert not self.sample_on_weight
assert self.shuffle
def start_epoch(self, epoch: int):
self.current_epoch = epoch
def __len__(self):
return self.num_samples
def _get_sampling_probabilities(self, normalized: bool = True):
"""Return the sampling probabilities for each file inside `self.meta`."""
scores: tp.List[float] = []
for file_meta in self.meta:
score = 1.
if self.sample_on_weight and file_meta.weight is not None:
score *= file_meta.weight
if self.sample_on_duration:
score *= file_meta.duration
scores.append(score)
probabilities = torch.tensor(scores)
if normalized:
probabilities /= probabilities.sum()
return probabilities
@staticmethod
@lru_cache(16)
def _get_file_permutation(num_files: int, permutation_index: int, base_seed: int):
# Used to keep the most recent files permutation in memory implicitely.
# will work unless someone is using a lot of Datasets in parallel.
rng = torch.Generator()
rng.manual_seed(base_seed + permutation_index)
return torch.randperm(num_files, generator=rng)
def sample_file(self, index: int, rng: torch.Generator) -> AudioMeta:
"""Sample a given file from `self.meta`. Can be overridden in subclasses.
This is only called if `segment_duration` is not None.
You must use the provided random number generator `rng` for reproducibility.
You can further make use of the index accessed.
"""
if self.permutation_on_files:
assert self.current_epoch is not None
total_index = self.current_epoch * len(self) + index
permutation_index = total_index // len(self.meta)
relative_index = total_index % len(self.meta)
permutation = AudioDataset._get_file_permutation(
len(self.meta), permutation_index, self.shuffle_seed)
file_index = permutation[relative_index]
return self.meta[file_index]
if not self.sample_on_weight and not self.sample_on_duration:
file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item())
else:
file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item())
return self.meta[file_index]
def _audio_read(self, path: str, seek_time: float = 0, duration: float = -1):
# Override this method in subclass if needed.
if self.load_wav:
return audio_read(path, seek_time, duration, pad=False)
else:
assert self.segment_duration is not None
n_frames = int(self.sample_rate * self.segment_duration)
return torch.zeros(self.channels, n_frames), self.sample_rate
def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]:
if self.segment_duration is None:
file_meta = self.meta[index]
out, sr = audio_read(file_meta.path)
out = convert_audio(out, sr, self.sample_rate, self.channels)
n_frames = out.shape[-1]
segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames,
sample_rate=self.sample_rate, channels=out.shape[0])
else:
rng = torch.Generator()
if self.shuffle:
# We use index, plus extra randomness, either totally random if we don't know the epoch.
# otherwise we make use of the epoch number and optional shuffle_seed.
if self.current_epoch is None:
rng.manual_seed(index + self.num_samples * random.randint(0, 2**24))
else:
rng.manual_seed(index + self.num_samples * (self.current_epoch + self.shuffle_seed))
else:
# We only use index
rng.manual_seed(index)
for retry in range(self.max_read_retry):
file_meta = self.sample_file(index, rng)
# We add some variance in the file position even if audio file is smaller than segment
# without ending up with empty segments
max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio)
seek_time = torch.rand(1, generator=rng).item() * max_seek
try:
out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False)
out = convert_audio(out, sr, self.sample_rate, self.channels)
n_frames = out.shape[-1]
target_frames = int(self.segment_duration * self.sample_rate)
if self.pad:
out = F.pad(out, (0, target_frames - n_frames))
segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames,
sample_rate=self.sample_rate, channels=out.shape[0])
except Exception as exc:
logger.warning("Error opening file %s: %r", file_meta.path, exc)
if retry == self.max_read_retry - 1:
raise
else:
break
if self.return_info:
# Returns the wav and additional information on the wave segment
return out, segment_info
else:
return out
def collater(self, samples):
"""The collater function has to be provided to the dataloader
if AudioDataset has return_info=True in order to properly collate
the samples of a batch.
"""
if self.segment_duration is None and len(samples) > 1:
assert self.pad, "Must allow padding when batching examples of different durations."
# In this case the audio reaching the collater is of variable length as segment_duration=None.
to_pad = self.segment_duration is None and self.pad
if to_pad:
max_len = max([wav.shape[-1] for wav, _ in samples])
def _pad_wav(wav):
return F.pad(wav, (0, max_len - wav.shape[-1]))
if self.return_info:
if len(samples) > 0:
assert len(samples[0]) == 2
assert isinstance(samples[0][0], torch.Tensor)
assert isinstance(samples[0][1], SegmentInfo)
wavs = [wav for wav, _ in samples]
segment_infos = [copy.deepcopy(info) for _, info in samples]
if to_pad:
# Each wav could be of a different duration as they are not segmented.
for i in range(len(samples)):
# Determines the total length of the signal with padding, so we update here as we pad.
segment_infos[i].total_frames = max_len
wavs[i] = _pad_wav(wavs[i])
wav = torch.stack(wavs)
return wav, segment_infos
else:
assert isinstance(samples[0], torch.Tensor)
if to_pad:
samples = [_pad_wav(s) for s in samples]
return torch.stack(samples)
def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
"""Filters out audio files with audio durations that will not allow to sample examples from them."""
orig_len = len(meta)
# Filter data that is too short.
if self.min_audio_duration is not None:
meta = [m for m in meta if m.duration >= self.min_audio_duration]
# Filter data that is too long.
if self.max_audio_duration is not None:
meta = [m for m in meta if m.duration <= self.max_audio_duration]
filtered_len = len(meta)
removed_percentage = 100*(1-float(filtered_len)/orig_len)
msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage
if removed_percentage < 10:
logging.debug(msg)
else:
logging.warning(msg)
return meta
@classmethod
def from_meta(cls, root: tp.Union[str, Path], **kwargs):
"""Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file.
Args:
root (str or Path): Path to root folder containing audio files.
kwargs: Additional keyword arguments for the AudioDataset.
"""
root = Path(root)
if root.is_dir():
if (root / 'data.jsonl').exists():
root = root / 'data.jsonl'
elif (root / 'data.jsonl.gz').exists():
root = root / 'data.jsonl.gz'
else:
raise ValueError("Don't know where to read metadata from in the dir. "
"Expecting either a data.jsonl or data.jsonl.gz file but none found.")
meta = load_audio_meta(root)
return cls(meta, **kwargs)
@classmethod
def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True,
exts: tp.List[str] = DEFAULT_EXTS, **kwargs):
"""Instantiate AudioDataset from a path containing (possibly nested) audio files.
Args:
root (str or Path): Path to root folder containing audio files.
minimal_meta (bool): Whether to only load minimal metadata or not.
exts (list of str): Extensions for audio files.
kwargs: Additional keyword arguments for the AudioDataset.
"""
root = Path(root)
if root.is_file():
meta = load_audio_meta(root, resolve=True)
else:
meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True)
return cls(meta, **kwargs)
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
parser = argparse.ArgumentParser(
prog='audio_dataset',
description='Generate .jsonl files by scanning a folder.')
parser.add_argument('root', help='Root folder with all the audio files')
parser.add_argument('output_meta_file',
help='Output file to store the metadata, ')
parser.add_argument('--complete',
action='store_false', dest='minimal', default=True,
help='Retrieve all metadata, even the one that are expansive '
'to compute (e.g. normalization).')
parser.add_argument('--resolve',
action='store_true', default=False,
help='Resolve the paths to be absolute and with no symlinks.')
parser.add_argument('--workers',
default=10, type=int,
help='Number of workers.')
args = parser.parse_args()
meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True,
resolve=args.resolve, minimal=args.minimal, workers=args.workers)
save_audio_meta(args.output_meta_file, meta)
if __name__ == '__main__':
main()
| audiocraft-main | audiocraft/data/audio_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Audio loading and writing support. Datasets for raw audio
or also including some metadata."""
# flake8: noqa
from . import audio, audio_dataset, info_audio_dataset, music_dataset, sound_dataset
| audiocraft-main | audiocraft/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Dataset of music tracks with rich metadata.
"""
from dataclasses import dataclass, field, fields, replace
import gzip
import json
import logging
from pathlib import Path
import random
import typing as tp
import torch
from .info_audio_dataset import (
InfoAudioDataset,
AudioInfo,
get_keyword_list,
get_keyword,
get_string
)
from ..modules.conditioners import (
ConditioningAttributes,
JointEmbedCondition,
WavCondition,
)
from ..utils.utils import warn_once
logger = logging.getLogger(__name__)
@dataclass
class MusicInfo(AudioInfo):
"""Segment info augmented with music metadata.
"""
# music-specific metadata
title: tp.Optional[str] = None
artist: tp.Optional[str] = None # anonymized artist id, used to ensure no overlap between splits
key: tp.Optional[str] = None
bpm: tp.Optional[float] = None
genre: tp.Optional[str] = None
moods: tp.Optional[list] = None
keywords: tp.Optional[list] = None
description: tp.Optional[str] = None
name: tp.Optional[str] = None
instrument: tp.Optional[str] = None
# original wav accompanying the metadata
self_wav: tp.Optional[WavCondition] = None
# dict mapping attributes names to tuple of wav, text and metadata
joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict)
@property
def has_music_meta(self) -> bool:
return self.name is not None
def to_condition_attributes(self) -> ConditioningAttributes:
out = ConditioningAttributes()
for _field in fields(self):
key, value = _field.name, getattr(self, _field.name)
if key == 'self_wav':
out.wav[key] = value
elif key == 'joint_embed':
for embed_attribute, embed_cond in value.items():
out.joint_embed[embed_attribute] = embed_cond
else:
if isinstance(value, list):
value = ' '.join(value)
out.text[key] = value
return out
@staticmethod
def attribute_getter(attribute):
if attribute == 'bpm':
preprocess_func = get_bpm
elif attribute == 'key':
preprocess_func = get_musical_key
elif attribute in ['moods', 'keywords']:
preprocess_func = get_keyword_list
elif attribute in ['genre', 'name', 'instrument']:
preprocess_func = get_keyword
elif attribute in ['title', 'artist', 'description']:
preprocess_func = get_string
else:
preprocess_func = None
return preprocess_func
@classmethod
def from_dict(cls, dictionary: dict, fields_required: bool = False):
_dictionary: tp.Dict[str, tp.Any] = {}
# allow a subset of attributes to not be loaded from the dictionary
# these attributes may be populated later
post_init_attributes = ['self_wav', 'joint_embed']
optional_fields = ['keywords']
for _field in fields(cls):
if _field.name in post_init_attributes:
continue
elif _field.name not in dictionary:
if fields_required and _field.name not in optional_fields:
raise KeyError(f"Unexpected missing key: {_field.name}")
else:
preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
value = dictionary[_field.name]
if preprocess_func:
value = preprocess_func(value)
_dictionary[_field.name] = value
return cls(**_dictionary)
def augment_music_info_description(music_info: MusicInfo, merge_text_p: float = 0.,
drop_desc_p: float = 0., drop_other_p: float = 0.) -> MusicInfo:
"""Augment MusicInfo description with additional metadata fields and potential dropout.
Additional textual attributes are added given probability 'merge_text_conditions_p' and
the original textual description is dropped from the augmented description given probability drop_desc_p.
Args:
music_info (MusicInfo): The music metadata to augment.
merge_text_p (float): Probability of merging additional metadata to the description.
If provided value is 0, then no merging is performed.
drop_desc_p (float): Probability of dropping the original description on text merge.
if provided value is 0, then no drop out is performed.
drop_other_p (float): Probability of dropping the other fields used for text augmentation.
Returns:
MusicInfo: The MusicInfo with augmented textual description.
"""
def is_valid_field(field_name: str, field_value: tp.Any) -> bool:
valid_field_name = field_name in ['key', 'bpm', 'genre', 'moods', 'instrument', 'keywords']
valid_field_value = field_value is not None and isinstance(field_value, (int, float, str, list))
keep_field = random.uniform(0, 1) < drop_other_p
return valid_field_name and valid_field_value and keep_field
def process_value(v: tp.Any) -> str:
if isinstance(v, (int, float, str)):
return str(v)
if isinstance(v, list):
return ", ".join(v)
else:
raise ValueError(f"Unknown type for text value! ({type(v), v})")
description = music_info.description
metadata_text = ""
if random.uniform(0, 1) < merge_text_p:
meta_pairs = [f'{_field.name}: {process_value(getattr(music_info, _field.name))}'
for _field in fields(music_info) if is_valid_field(_field.name, getattr(music_info, _field.name))]
random.shuffle(meta_pairs)
metadata_text = ". ".join(meta_pairs)
description = description if not random.uniform(0, 1) < drop_desc_p else None
logger.debug(f"Applying text augmentation on MMI info. description: {description}, metadata: {metadata_text}")
if description is None:
description = metadata_text if len(metadata_text) > 1 else None
else:
description = ". ".join([description.rstrip('.'), metadata_text])
description = description.strip() if description else None
music_info = replace(music_info)
music_info.description = description
return music_info
class Paraphraser:
def __init__(self, paraphrase_source: tp.Union[str, Path], paraphrase_p: float = 0.):
self.paraphrase_p = paraphrase_p
open_fn = gzip.open if str(paraphrase_source).lower().endswith('.gz') else open
with open_fn(paraphrase_source, 'rb') as f: # type: ignore
self.paraphrase_source = json.loads(f.read())
logger.info(f"loaded paraphrasing source from: {paraphrase_source}")
def sample_paraphrase(self, audio_path: str, description: str):
if random.random() >= self.paraphrase_p:
return description
info_path = Path(audio_path).with_suffix('.json')
if info_path not in self.paraphrase_source:
warn_once(logger, f"{info_path} not in paraphrase source!")
return description
new_desc = random.choice(self.paraphrase_source[info_path])
logger.debug(f"{description} -> {new_desc}")
return new_desc
class MusicDataset(InfoAudioDataset):
"""Music dataset is an AudioDataset with music-related metadata.
Args:
info_fields_required (bool): Whether to enforce having required fields.
merge_text_p (float): Probability of merging additional metadata to the description.
drop_desc_p (float): Probability of dropping the original description on text merge.
drop_other_p (float): Probability of dropping the other fields used for text augmentation.
joint_embed_attributes (list[str]): A list of attributes for which joint embedding metadata is returned.
paraphrase_source (str, optional): Path to the .json or .json.gz file containing the
paraphrases for the description. The json should be a dict with keys are the
original info path (e.g. track_path.json) and each value is a list of possible
paraphrased.
paraphrase_p (float): probability of taking a paraphrase.
See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
"""
def __init__(self, *args, info_fields_required: bool = True,
merge_text_p: float = 0., drop_desc_p: float = 0., drop_other_p: float = 0.,
joint_embed_attributes: tp.List[str] = [],
paraphrase_source: tp.Optional[str] = None, paraphrase_p: float = 0,
**kwargs):
kwargs['return_info'] = True # We require the info for each song of the dataset.
super().__init__(*args, **kwargs)
self.info_fields_required = info_fields_required
self.merge_text_p = merge_text_p
self.drop_desc_p = drop_desc_p
self.drop_other_p = drop_other_p
self.joint_embed_attributes = joint_embed_attributes
self.paraphraser = None
if paraphrase_source is not None:
self.paraphraser = Paraphraser(paraphrase_source, paraphrase_p)
def __getitem__(self, index):
wav, info = super().__getitem__(index)
info_data = info.to_dict()
music_info_path = Path(info.meta.path).with_suffix('.json')
if Path(music_info_path).exists():
with open(music_info_path, 'r') as json_file:
music_data = json.load(json_file)
music_data.update(info_data)
music_info = MusicInfo.from_dict(music_data, fields_required=self.info_fields_required)
if self.paraphraser is not None:
music_info.description = self.paraphraser.sample(music_info.meta.path, music_info.description)
if self.merge_text_p:
music_info = augment_music_info_description(
music_info, self.merge_text_p, self.drop_desc_p, self.drop_other_p)
else:
music_info = MusicInfo.from_dict(info_data, fields_required=False)
music_info.self_wav = WavCondition(
wav=wav[None], length=torch.tensor([info.n_frames]),
sample_rate=[info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
for att in self.joint_embed_attributes:
att_value = getattr(music_info, att)
joint_embed_cond = JointEmbedCondition(
wav[None], [att_value], torch.tensor([info.n_frames]),
sample_rate=[info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
music_info.joint_embed[att] = joint_embed_cond
return wav, music_info
def get_musical_key(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess key keywords, discarding them if there are multiple key defined."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
elif ',' in value:
# For now, we discard when multiple keys are defined separated with comas
return None
else:
return value.strip().lower()
def get_bpm(value: tp.Optional[str]) -> tp.Optional[float]:
"""Preprocess to a float."""
if value is None:
return None
try:
return float(value)
except ValueError:
return None
| audiocraft-main | audiocraft/data/music_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Utility for reading some info from inside a zip file.
"""
import typing
import zipfile
from dataclasses import dataclass
from functools import lru_cache
from typing_extensions import Literal
DEFAULT_SIZE = 32
MODE = Literal['r', 'w', 'x', 'a']
@dataclass(order=True)
class PathInZip:
"""Hold a path of file within a zip file.
Args:
path (str): The convention is <path_to_zip>:<relative_path_inside_zip>.
Let's assume there is a zip file /some/location/foo.zip
and inside of it is a json file located at /data/file1.json,
Then we expect path = "/some/location/foo.zip:/data/file1.json".
"""
INFO_PATH_SEP = ':'
zip_path: str
file_path: str
def __init__(self, path: str) -> None:
split_path = path.split(self.INFO_PATH_SEP)
assert len(split_path) == 2
self.zip_path, self.file_path = split_path
@classmethod
def from_paths(cls, zip_path: str, file_path: str):
return cls(zip_path + cls.INFO_PATH_SEP + file_path)
def __str__(self) -> str:
return self.zip_path + self.INFO_PATH_SEP + self.file_path
def _open_zip(path: str, mode: MODE = 'r'):
return zipfile.ZipFile(path, mode)
_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip)
def set_zip_cache_size(max_size: int):
"""Sets the maximal LRU caching for zip file opening.
Args:
max_size (int): the maximal LRU cache.
"""
global _cached_open_zip
_cached_open_zip = lru_cache(max_size)(_open_zip)
def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO:
"""Opens a file stored inside a zip and returns a file-like object.
Args:
path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of.
mode (str): The mode in which to open the file with.
Returns:
A file-like object for PathInZip.
"""
zf = _cached_open_zip(path_in_zip.zip_path)
return zf.open(path_in_zip.file_path)
| audiocraft-main | audiocraft/data/zip.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Various utilities for audio convertion (pcm format, sample rate and channels),
and volume normalization."""
import sys
import typing as tp
import julius
import torch
import torchaudio
def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
"""Convert audio to the given number of channels.
Args:
wav (torch.Tensor): Audio wave of shape [B, C, T].
channels (int): Expected number of channels as output.
Returns:
torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
"""
*shape, src_channels, length = wav.shape
if src_channels == channels:
pass
elif channels == 1:
# Case 1:
# The caller asked 1-channel audio, and the stream has multiple
# channels, downmix all channels.
wav = wav.mean(dim=-2, keepdim=True)
elif src_channels == 1:
# Case 2:
# The caller asked for multiple channels, but the input file has
# a single channel, replicate the audio over all channels.
wav = wav.expand(*shape, channels, length)
elif src_channels >= channels:
# Case 3:
# The caller asked for multiple channels, and the input file has
# more channels than requested. In that case return the first channels.
wav = wav[..., :channels, :]
else:
# Case 4: What is a reasonable choice here?
raise ValueError('The audio file has less channels than requested but is not mono.')
return wav
def convert_audio(wav: torch.Tensor, from_rate: float,
to_rate: float, to_channels: int) -> torch.Tensor:
"""Convert audio to new sample rate and number of audio channels."""
wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
wav = convert_audio_channels(wav, to_channels)
return wav
def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
loudness_compressor: bool = False, energy_floor: float = 2e-3):
"""Normalize an input signal to a user loudness in dB LKFS.
Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
Args:
wav (torch.Tensor): Input multichannel audio data.
sample_rate (int): Sample rate.
loudness_headroom_db (float): Target loudness of the output in dB LUFS.
loudness_compressor (bool): Uses tanh for soft clipping.
energy_floor (float): anything below that RMS level will not be rescaled.
Returns:
torch.Tensor: Loudness normalized output data.
"""
energy = wav.pow(2).mean().sqrt().item()
if energy < energy_floor:
return wav
transform = torchaudio.transforms.Loudness(sample_rate)
input_loudness_db = transform(wav).item()
# calculate the gain needed to scale to the desired loudness level
delta_loudness = -loudness_headroom_db - input_loudness_db
gain = 10.0 ** (delta_loudness / 20.0)
output = gain * wav
if loudness_compressor:
output = torch.tanh(output)
assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
return output
def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
"""Utility function to clip the audio with logging if specified."""
max_scale = wav.abs().max()
if log_clipping and max_scale > 1:
clamp_prob = (wav.abs() > 1).float().mean().item()
print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
wav.clamp_(-1, 1)
def normalize_audio(wav: torch.Tensor, normalize: bool = True,
strategy: str = 'peak', peak_clip_headroom_db: float = 1,
rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
loudness_compressor: bool = False, log_clipping: bool = False,
sample_rate: tp.Optional[int] = None,
stem_name: tp.Optional[str] = None) -> torch.Tensor:
"""Normalize the audio according to the prescribed strategy (see after).
Args:
wav (torch.Tensor): Audio data.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): If True, uses tanh based soft clipping.
log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
sample_rate (int): Sample rate for the audio data (required for loudness).
stem_name (str, optional): Stem name for clipping logging.
Returns:
torch.Tensor: Normalized audio.
"""
scale_peak = 10 ** (-peak_clip_headroom_db / 20)
scale_rms = 10 ** (-rms_headroom_db / 20)
if strategy == 'peak':
rescaling = (scale_peak / wav.abs().max())
if normalize or rescaling < 1:
wav = wav * rescaling
elif strategy == 'clip':
wav = wav.clamp(-scale_peak, scale_peak)
elif strategy == 'rms':
mono = wav.mean(dim=0)
rescaling = scale_rms / mono.pow(2).mean().sqrt()
if normalize or rescaling < 1:
wav = wav * rescaling
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
elif strategy == 'loudness':
assert sample_rate is not None, "Loudness normalization requires sample rate."
wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
else:
assert wav.abs().max() < 1
assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
return wav
def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
"""Convert audio to float 32 bits PCM format.
"""
if wav.dtype.is_floating_point:
return wav
elif wav.dtype == torch.int16:
return wav.float() / 2**15
elif wav.dtype == torch.int32:
return wav.float() / 2**31
raise ValueError(f"Unsupported wav dtype: {wav.dtype}")
def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
"""Convert audio to int 16 bits PCM format.
..Warning:: There exist many formula for doing this conversion. None are perfect
due to the asymmetry of the int16 range. One either have possible clipping, DC offset,
or inconsistencies with f32_pcm. If the given wav doesn't have enough headroom,
it is possible that `i16_pcm(f32_pcm)) != Identity`.
"""
if wav.dtype.is_floating_point:
assert wav.abs().max() <= 1
candidate = (wav * 2 ** 15).round()
if candidate.max() >= 2 ** 15: # clipping would occur
candidate = (wav * (2 ** 15 - 1)).round()
return candidate.short()
else:
assert wav.dtype == torch.int16
return wav
| audiocraft-main | audiocraft/data/audio_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Base classes for the datasets that also provide non-audio metadata,
e.g. description, text transcription etc.
"""
from dataclasses import dataclass
import logging
import math
import re
import typing as tp
import torch
from .audio_dataset import AudioDataset, AudioMeta
from ..environment import AudioCraftEnvironment
from ..modules.conditioners import SegmentWithAttributes, ConditioningAttributes
logger = logging.getLogger(__name__)
def _clusterify_meta(meta: AudioMeta) -> AudioMeta:
"""Monkey-patch meta to match cluster specificities."""
meta.path = AudioCraftEnvironment.apply_dataset_mappers(meta.path)
if meta.info_path is not None:
meta.info_path.zip_path = AudioCraftEnvironment.apply_dataset_mappers(meta.info_path.zip_path)
return meta
def clusterify_all_meta(meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
"""Monkey-patch all meta to match cluster specificities."""
return [_clusterify_meta(m) for m in meta]
@dataclass
class AudioInfo(SegmentWithAttributes):
"""Dummy SegmentInfo with empty attributes.
The InfoAudioDataset is expected to return metadata that inherits
from SegmentWithAttributes class and can return conditioning attributes.
This basically guarantees all datasets will be compatible with current
solver that contain conditioners requiring this.
"""
audio_tokens: tp.Optional[torch.Tensor] = None # populated when using cached batch for training a LM.
def to_condition_attributes(self) -> ConditioningAttributes:
return ConditioningAttributes()
class InfoAudioDataset(AudioDataset):
"""AudioDataset that always returns metadata as SegmentWithAttributes along with the audio waveform.
See `audiocraft.data.audio_dataset.AudioDataset` for initialization arguments.
"""
def __init__(self, meta: tp.List[AudioMeta], **kwargs):
super().__init__(clusterify_all_meta(meta), **kwargs)
def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentWithAttributes]]:
if not self.return_info:
wav = super().__getitem__(index)
assert isinstance(wav, torch.Tensor)
return wav
wav, meta = super().__getitem__(index)
return wav, AudioInfo(**meta.to_dict())
def get_keyword_or_keyword_list(value: tp.Optional[str]) -> tp.Union[tp.Optional[str], tp.Optional[tp.List[str]]]:
"""Preprocess a single keyword or possible a list of keywords."""
if isinstance(value, list):
return get_keyword_list(value)
else:
return get_keyword(value)
def get_string(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess a single keyword."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
else:
return value.strip()
def get_keyword(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess a single keyword."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
else:
return value.strip().lower()
def get_keyword_list(values: tp.Union[str, tp.List[str]]) -> tp.Optional[tp.List[str]]:
"""Preprocess a list of keywords."""
if isinstance(values, str):
values = [v.strip() for v in re.split(r'[,\s]', values)]
elif isinstance(values, float) and math.isnan(values):
values = []
if not isinstance(values, list):
logger.debug(f"Unexpected keyword list {values}")
values = [str(values)]
kws = [get_keyword(v) for v in values]
kw_list = [k for k in kws if k is not None]
if len(kw_list) == 0:
return None
else:
return kw_list
| audiocraft-main | audiocraft/data/info_audio_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Audio IO methods are defined in this module (info, read, write),
We rely on av library for faster read when possible, otherwise on torchaudio.
"""
from dataclasses import dataclass
from pathlib import Path
import logging
import typing as tp
import numpy as np
import soundfile
import torch
from torch.nn import functional as F
import torchaudio as ta
import av
from .audio_utils import f32_pcm, i16_pcm, normalize_audio
_av_initialized = False
def _init_av():
global _av_initialized
if _av_initialized:
return
logger = logging.getLogger('libav.mp3')
logger.setLevel(logging.ERROR)
_av_initialized = True
@dataclass(frozen=True)
class AudioFileInfo:
sample_rate: int
duration: float
channels: int
def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
_init_av()
with av.open(str(filepath)) as af:
stream = af.streams.audio[0]
sample_rate = stream.codec_context.sample_rate
duration = float(stream.duration * stream.time_base)
channels = stream.channels
return AudioFileInfo(sample_rate, duration, channels)
def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
info = soundfile.info(filepath)
return AudioFileInfo(info.samplerate, info.duration, info.channels)
def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
# torchaudio no longer returns useful duration informations for some formats like mp3s.
filepath = Path(filepath)
if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info
# ffmpeg has some weird issue with flac.
return _soundfile_info(filepath)
else:
return _av_info(filepath)
def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
"""FFMPEG-based audio file reading using PyAV bindings.
Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate
"""
_init_av()
with av.open(str(filepath)) as af:
stream = af.streams.audio[0]
sr = stream.codec_context.sample_rate
num_frames = int(sr * duration) if duration >= 0 else -1
frame_offset = int(sr * seek_time)
# we need a small negative offset otherwise we get some edge artifact
# from the mp3 decoder.
af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
frames = []
length = 0
for frame in af.decode(streams=stream.index):
current_offset = int(frame.rate * frame.pts * frame.time_base)
strip = max(0, frame_offset - current_offset)
buf = torch.from_numpy(frame.to_ndarray())
if buf.shape[0] != stream.channels:
buf = buf.view(-1, stream.channels).t()
buf = buf[:, strip:]
frames.append(buf)
length += buf.shape[1]
if num_frames > 0 and length >= num_frames:
break
assert frames
# If the above assert fails, it is likely because we seeked past the end of file point,
# in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
# This will need proper debugging, in due time.
wav = torch.cat(frames, dim=1)
assert wav.shape[0] == stream.channels
if num_frames > 0:
wav = wav[:, :num_frames]
return f32_pcm(wav), sr
def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
"""Read audio by picking the most appropriate backend tool based on the audio format.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
pad (bool): Pad output audio if not reaching expected duration.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate.
"""
fp = Path(filepath)
if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
# There is some bug with ffmpeg and reading flac
info = _soundfile_info(filepath)
frames = -1 if duration <= 0 else int(duration * info.sample_rate)
frame_offset = int(seek_time * info.sample_rate)
wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
wav = torch.from_numpy(wav).t().contiguous()
if len(wav.shape) == 1:
wav = torch.unsqueeze(wav, 0)
elif (
fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats()
and duration <= 0 and seek_time == 0
):
# Torchaudio is faster if we load an entire file at once.
wav, sr = ta.load(fp)
else:
wav, sr = _av_read(filepath, seek_time, duration)
if pad and duration > 0:
expected_frames = int(duration * sr)
wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
return wav, sr
def audio_write(stem_name: tp.Union[str, Path],
wav: torch.Tensor, sample_rate: int,
format: str = 'wav', mp3_rate: int = 320, normalize: bool = True,
strategy: str = 'peak', peak_clip_headroom_db: float = 1,
rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
loudness_compressor: bool = False,
log_clipping: bool = True, make_parent_dir: bool = True,
add_suffix: bool = True) -> Path:
"""Convenience function for saving audio to disk. Returns the filename the audio was written to.
Args:
stem_name (str or Path): Filename without extension which will be added automatically.
wav (torch.Tensor): Audio data to save.
sample_rate (int): Sample rate of audio data.
format (str): Either "wav" or "mp3".
mp3_rate (int): kbps when using mp3s.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
make_parent_dir (bool): Make parent directory if it doesn't exist.
Returns:
Path: Path of the saved audio.
"""
assert wav.dtype.is_floating_point, "wav is not floating point"
if wav.dim() == 1:
wav = wav[None]
elif wav.dim() > 2:
raise ValueError("Input wav should be at most 2 dimension.")
assert wav.isfinite().all()
wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
rms_headroom_db, loudness_headroom_db, loudness_compressor,
log_clipping=log_clipping, sample_rate=sample_rate,
stem_name=str(stem_name))
kwargs: dict = {}
if format == 'mp3':
suffix = '.mp3'
kwargs.update({"compression": mp3_rate})
elif format == 'wav':
wav = i16_pcm(wav)
suffix = '.wav'
kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16})
else:
raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
if not add_suffix:
suffix = ''
path = Path(str(stem_name) + suffix)
if make_parent_dir:
path.parent.mkdir(exist_ok=True, parents=True)
try:
ta.save(path, wav, sample_rate, **kwargs)
except Exception:
if path.exists():
# we do not want to leave half written files around.
path.unlink()
raise
return path
| audiocraft-main | audiocraft/data/audio.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Dataset of audio with a simple description.
"""
from dataclasses import dataclass, fields, replace
import json
from pathlib import Path
import random
import typing as tp
import numpy as np
import torch
from .info_audio_dataset import (
InfoAudioDataset,
get_keyword_or_keyword_list
)
from ..modules.conditioners import (
ConditioningAttributes,
SegmentWithAttributes,
WavCondition,
)
EPS = torch.finfo(torch.float32).eps
TARGET_LEVEL_LOWER = -35
TARGET_LEVEL_UPPER = -15
@dataclass
class SoundInfo(SegmentWithAttributes):
"""Segment info augmented with Sound metadata.
"""
description: tp.Optional[str] = None
self_wav: tp.Optional[torch.Tensor] = None
@property
def has_sound_meta(self) -> bool:
return self.description is not None
def to_condition_attributes(self) -> ConditioningAttributes:
out = ConditioningAttributes()
for _field in fields(self):
key, value = _field.name, getattr(self, _field.name)
if key == 'self_wav':
out.wav[key] = value
else:
out.text[key] = value
return out
@staticmethod
def attribute_getter(attribute):
if attribute == 'description':
preprocess_func = get_keyword_or_keyword_list
else:
preprocess_func = None
return preprocess_func
@classmethod
def from_dict(cls, dictionary: dict, fields_required: bool = False):
_dictionary: tp.Dict[str, tp.Any] = {}
# allow a subset of attributes to not be loaded from the dictionary
# these attributes may be populated later
post_init_attributes = ['self_wav']
for _field in fields(cls):
if _field.name in post_init_attributes:
continue
elif _field.name not in dictionary:
if fields_required:
raise KeyError(f"Unexpected missing key: {_field.name}")
else:
preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
value = dictionary[_field.name]
if preprocess_func:
value = preprocess_func(value)
_dictionary[_field.name] = value
return cls(**_dictionary)
class SoundDataset(InfoAudioDataset):
"""Sound audio dataset: Audio dataset with environmental sound-specific metadata.
Args:
info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata.
external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset.
The metadata files contained in this folder are expected to match the stem of the audio file with
a json extension.
aug_p (float): Probability of performing audio mixing augmentation on the batch.
mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation.
mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation.
mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation.
mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation.
kwargs: Additional arguments for AudioDataset.
See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
"""
def __init__(
self,
*args,
info_fields_required: bool = True,
external_metadata_source: tp.Optional[str] = None,
aug_p: float = 0.,
mix_p: float = 0.,
mix_snr_low: int = -5,
mix_snr_high: int = 5,
mix_min_overlap: float = 0.5,
**kwargs
):
kwargs['return_info'] = True # We require the info for each song of the dataset.
super().__init__(*args, **kwargs)
self.info_fields_required = info_fields_required
self.external_metadata_source = external_metadata_source
self.aug_p = aug_p
self.mix_p = mix_p
if self.aug_p > 0:
assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0"
assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio"
self.mix_snr_low = mix_snr_low
self.mix_snr_high = mix_snr_high
self.mix_min_overlap = mix_min_overlap
def _get_info_path(self, path: tp.Union[str, Path]) -> Path:
"""Get path of JSON with metadata (description, etc.).
If there exists a JSON with the same name as 'path.name', then it will be used.
Else, such JSON will be searched for in an external json source folder if it exists.
"""
info_path = Path(path).with_suffix('.json')
if Path(info_path).exists():
return info_path
elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists():
return Path(self.external_metadata_source) / info_path.name
else:
raise Exception(f"Unable to find a metadata JSON for path: {path}")
def __getitem__(self, index):
wav, info = super().__getitem__(index)
info_data = info.to_dict()
info_path = self._get_info_path(info.meta.path)
if Path(info_path).exists():
with open(info_path, 'r') as json_file:
sound_data = json.load(json_file)
sound_data.update(info_data)
sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required)
# if there are multiple descriptions, sample one randomly
if isinstance(sound_info.description, list):
sound_info.description = random.choice(sound_info.description)
else:
sound_info = SoundInfo.from_dict(info_data, fields_required=False)
sound_info.self_wav = WavCondition(
wav=wav[None], length=torch.tensor([info.n_frames]),
sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
return wav, sound_info
def collater(self, samples):
# when training, audio mixing is performed in the collate function
wav, sound_info = super().collater(samples) # SoundDataset always returns infos
if self.aug_p > 0:
wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p,
snr_low=self.mix_snr_low, snr_high=self.mix_snr_high,
min_overlap=self.mix_min_overlap)
return wav, sound_info
def rms_f(x: torch.Tensor) -> torch.Tensor:
return (x ** 2).mean(1).pow(0.5)
def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor:
"""Normalize the signal to the target level."""
rms = rms_f(audio)
scalar = 10 ** (target_level / 20) / (rms + EPS)
audio = audio * scalar.unsqueeze(1)
return audio
def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor:
return (abs(audio) > clipping_threshold).any(1)
def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor:
start = random.randint(0, int(src.shape[1] * (1 - min_overlap)))
remainder = src.shape[1] - start
if dst.shape[1] > remainder:
src[:, start:] = src[:, start:] + dst[:, :remainder]
else:
src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst
return src
def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float,
target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor:
"""Function to mix clean speech and noise at various SNR levels.
Args:
clean (torch.Tensor): Clean audio source to mix, of shape [B, T].
noise (torch.Tensor): Noise audio source to mix, of shape [B, T].
snr (int): SNR level when mixing.
min_overlap (float): Minimum overlap between the two mixed sources.
target_level (int): Gain level in dB.
clipping_threshold (float): Threshold for clipping the audio.
Returns:
torch.Tensor: The mixed audio, of shape [B, T].
"""
if clean.shape[1] > noise.shape[1]:
noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1]))
else:
noise = noise[:, :clean.shape[1]]
# normalizing to -25 dB FS
clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS)
clean = normalize(clean, target_level)
rmsclean = rms_f(clean)
noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS)
noise = normalize(noise, target_level)
rmsnoise = rms_f(noise)
# set the noise level for a given SNR
noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1)
noisenewlevel = noise * noisescalar
# mix noise and clean speech
noisyspeech = mix_pair(clean, noisenewlevel, min_overlap)
# randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
# there is a chance of clipping that might happen with very less probability, which is not a major issue.
noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER)
rmsnoisy = rms_f(noisyspeech)
scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1)
noisyspeech = noisyspeech * scalarnoisy
clean = clean * scalarnoisy
noisenewlevel = noisenewlevel * scalarnoisy
# final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
clipped = is_clipped(noisyspeech)
if clipped.any():
noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS)
noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel
return noisyspeech
def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float):
if snr_low == snr_high:
snr = snr_low
else:
snr = np.random.randint(snr_low, snr_high)
mix = snr_mixer(src, dst, snr, min_overlap)
return mix
def mix_text(src_text: str, dst_text: str):
"""Mix text from different sources by concatenating them."""
if src_text == dst_text:
return src_text
return src_text + " " + dst_text
def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float,
snr_low: int, snr_high: int, min_overlap: float):
"""Mix samples within a batch, summing the waveforms and concatenating the text infos.
Args:
wavs (torch.Tensor): Audio tensors of shape [B, C, T].
infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio.
aug_p (float): Augmentation probability.
mix_p (float): Proportion of items in the batch to mix (and merge) together.
snr_low (int): Lowerbound for sampling SNR.
snr_high (int): Upperbound for sampling SNR.
min_overlap (float): Minimum overlap between mixed samples.
Returns:
tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs
and mixed SoundInfo for the given batch.
"""
# no mixing to perform within the batch
if mix_p == 0:
return wavs, infos
if random.uniform(0, 1) < aug_p:
# perform all augmentations on waveforms as [B, T]
# randomly picking pairs of audio to mix
assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}"
wavs = wavs.mean(dim=1, keepdim=False)
B, T = wavs.shape
k = int(mix_p * B)
mixed_sources_idx = torch.randperm(B)[:k]
mixed_targets_idx = torch.randperm(B)[:k]
aug_wavs = snr_mix(
wavs[mixed_sources_idx],
wavs[mixed_targets_idx],
snr_low,
snr_high,
min_overlap,
)
# mixing textual descriptions in metadata
descriptions = [info.description for info in infos]
aug_infos = []
for i, j in zip(mixed_sources_idx, mixed_targets_idx):
text = mix_text(descriptions[i], descriptions[j])
m = replace(infos[i])
m.description = text
aug_infos.append(m)
# back to [B, C, T]
aug_wavs = aug_wavs.unsqueeze(1)
assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch."
assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}"
assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch"
return aug_wavs, aug_infos # [B, C, T]
else:
# randomly pick samples in the batch to match
# the batch size when performing audio mixing
B, C, T = wavs.shape
k = int(mix_p * B)
wav_idx = torch.randperm(B)[:k]
wavs = wavs[wav_idx]
infos = [infos[i] for i in wav_idx]
assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch"
return wavs, infos # [B, C, T]
| audiocraft-main | audiocraft/data/sound_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import typing as tp
import torch
import torchaudio
def get_white_noise(chs: int = 1, num_frames: int = 1):
wav = torch.randn(chs, num_frames)
return wav
def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1):
wav = torch.randn(bs, chs, num_frames)
return wav
def save_wav(path: str, wav: torch.Tensor, sample_rate: int):
fp = Path(path)
kwargs: tp.Dict[str, tp.Any] = {}
if fp.suffix == '.wav':
kwargs['encoding'] = 'PCM_S'
kwargs['bits_per_sample'] = 16
elif fp.suffix == '.mp3':
kwargs['compression'] = 320
torchaudio.save(str(fp), wav, sample_rate, **kwargs)
| audiocraft-main | tests/common_utils/wav_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa
from .temp_utils import TempDirMixin
from .wav_utils import get_batch_white_noise, get_white_noise, save_wav
| audiocraft-main | tests/common_utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
class TempDirMixin:
"""Mixin to provide easy access to temp dir.
"""
temp_dir_ = None
@classmethod
def get_base_temp_dir(cls):
# If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory.
# this is handy for debugging.
key = "AUDIOCRAFT_TEST_DIR"
if key in os.environ:
return os.environ[key]
if cls.temp_dir_ is None:
cls.temp_dir_ = tempfile.TemporaryDirectory()
return cls.temp_dir_.name
@classmethod
def tearDownClass(cls):
if cls.temp_dir_ is not None:
try:
cls.temp_dir_.cleanup()
cls.temp_dir_ = None
except PermissionError:
# On Windows there is a know issue with `shutil.rmtree`,
# which fails intermittently.
# https://github.com/python/cpython/issues/74168
# Following the above thread, we ignore it.
pass
super().tearDownClass()
@property
def id(self):
return self.__class__.__name__
def get_temp_path(self, *paths):
temp_dir = os.path.join(self.get_base_temp_dir(), self.id)
path = os.path.join(temp_dir, *paths)
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
def get_temp_dir(self, *paths):
temp_dir = os.path.join(self.get_base_temp_dir(), self.id)
path = os.path.join(temp_dir, *paths)
os.makedirs(path, exist_ok=True)
return path
| audiocraft-main | tests/common_utils/temp_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
from audiocraft.losses import (
MelSpectrogramL1Loss,
MultiScaleMelSpectrogramLoss,
MRSTFTLoss,
SISNR,
STFTLoss,
)
def test_mel_l1_loss():
N, C, T = 2, 2, random.randrange(1000, 100_000)
t1 = torch.randn(N, C, T)
t2 = torch.randn(N, C, T)
mel_l1 = MelSpectrogramL1Loss(sample_rate=22_050)
loss = mel_l1(t1, t2)
loss_same = mel_l1(t1, t1)
assert isinstance(loss, torch.Tensor)
assert isinstance(loss_same, torch.Tensor)
assert loss_same.item() == 0.0
def test_msspec_loss():
N, C, T = 2, 2, random.randrange(1000, 100_000)
t1 = torch.randn(N, C, T)
t2 = torch.randn(N, C, T)
msspec = MultiScaleMelSpectrogramLoss(sample_rate=22_050)
loss = msspec(t1, t2)
loss_same = msspec(t1, t1)
assert isinstance(loss, torch.Tensor)
assert isinstance(loss_same, torch.Tensor)
assert loss_same.item() == 0.0
def test_mrstft_loss():
N, C, T = 2, 2, random.randrange(1000, 100_000)
t1 = torch.randn(N, C, T)
t2 = torch.randn(N, C, T)
mrstft = MRSTFTLoss()
loss = mrstft(t1, t2)
assert isinstance(loss, torch.Tensor)
def test_sisnr_loss():
N, C, T = 2, 2, random.randrange(1000, 100_000)
t1 = torch.randn(N, C, T)
t2 = torch.randn(N, C, T)
sisnr = SISNR()
loss = sisnr(t1, t2)
assert isinstance(loss, torch.Tensor)
def test_stft_loss():
N, C, T = 2, 2, random.randrange(1000, 100_000)
t1 = torch.randn(N, C, T)
t2 = torch.randn(N, C, T)
mrstft = STFTLoss()
loss = mrstft(t1, t2)
assert isinstance(loss, torch.Tensor)
| audiocraft-main | tests/losses/test_losses.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/losses/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import random
import torch
from audiocraft.adversarial import (
AdversarialLoss,
get_adv_criterion,
get_real_criterion,
get_fake_criterion,
FeatureMatchingLoss,
MultiScaleDiscriminator,
)
class TestAdversarialLoss:
def test_adversarial_single_multidiscriminator(self):
adv = MultiScaleDiscriminator()
optimizer = torch.optim.Adam(
adv.parameters(),
lr=1e-4,
)
loss, loss_real, loss_fake = get_adv_criterion('mse'), get_real_criterion('mse'), get_fake_criterion('mse')
adv_loss = AdversarialLoss(adv, optimizer, loss, loss_real, loss_fake)
B, C, T = 4, 1, random.randint(1000, 5000)
real = torch.randn(B, C, T)
fake = torch.randn(B, C, T)
disc_loss = adv_loss.train_adv(fake, real)
assert isinstance(disc_loss, torch.Tensor) and isinstance(disc_loss.item(), float)
loss, loss_feat = adv_loss(fake, real)
assert isinstance(loss, torch.Tensor) and isinstance(loss.item(), float)
# we did not specify feature loss
assert loss_feat.item() == 0.
def test_adversarial_feat_loss(self):
adv = MultiScaleDiscriminator()
optimizer = torch.optim.Adam(
adv.parameters(),
lr=1e-4,
)
loss, loss_real, loss_fake = get_adv_criterion('mse'), get_real_criterion('mse'), get_fake_criterion('mse')
feat_loss = FeatureMatchingLoss()
adv_loss = AdversarialLoss(adv, optimizer, loss, loss_real, loss_fake, feat_loss)
B, C, T = 4, 1, random.randint(1000, 5000)
real = torch.randn(B, C, T)
fake = torch.randn(B, C, T)
loss, loss_feat = adv_loss(fake, real)
assert isinstance(loss, torch.Tensor) and isinstance(loss.item(), float)
assert isinstance(loss_feat, torch.Tensor) and isinstance(loss.item(), float)
class TestGeneratorAdversarialLoss:
def test_hinge_generator_adv_loss(self):
adv_loss = get_adv_criterion(loss_type='hinge')
t0 = torch.randn(1, 2, 0)
t1 = torch.FloatTensor([1.0, 2.0, 3.0])
assert adv_loss(t0).item() == 0.0
assert adv_loss(t1).item() == -2.0
def test_mse_generator_adv_loss(self):
adv_loss = get_adv_criterion(loss_type='mse')
t0 = torch.randn(1, 2, 0)
t1 = torch.FloatTensor([1.0, 1.0, 1.0])
t2 = torch.FloatTensor([2.0, 5.0, 5.0])
assert adv_loss(t0).item() == 0.0
assert adv_loss(t1).item() == 0.0
assert adv_loss(t2).item() == 11.0
class TestDiscriminatorAdversarialLoss:
def _disc_loss(self, loss_type: str, fake: torch.Tensor, real: torch.Tensor):
disc_loss_real = get_real_criterion(loss_type)
disc_loss_fake = get_fake_criterion(loss_type)
loss = disc_loss_fake(fake) + disc_loss_real(real)
return loss
def test_hinge_discriminator_adv_loss(self):
loss_type = 'hinge'
t0 = torch.FloatTensor([0.0, 0.0, 0.0])
t1 = torch.FloatTensor([1.0, 2.0, 3.0])
assert self._disc_loss(loss_type, t0, t0).item() == 2.0
assert self._disc_loss(loss_type, t1, t1).item() == 3.0
def test_mse_discriminator_adv_loss(self):
loss_type = 'mse'
t0 = torch.FloatTensor([0.0, 0.0, 0.0])
t1 = torch.FloatTensor([1.0, 1.0, 1.0])
assert self._disc_loss(loss_type, t0, t0).item() == 1.0
assert self._disc_loss(loss_type, t1, t0).item() == 2.0
class TestFeatureMatchingLoss:
def test_features_matching_loss_base(self):
ft_matching_loss = FeatureMatchingLoss()
length = random.randrange(1, 100_000)
t1 = torch.randn(1, 2, length)
loss = ft_matching_loss([t1], [t1])
assert isinstance(loss, torch.Tensor)
assert loss.item() == 0.0
def test_features_matching_loss_raises_exception(self):
ft_matching_loss = FeatureMatchingLoss()
length = random.randrange(1, 100_000)
t1 = torch.randn(1, 2, length)
t2 = torch.randn(1, 2, length + 1)
with pytest.raises(AssertionError):
ft_matching_loss([], [])
with pytest.raises(AssertionError):
ft_matching_loss([t1], [t1, t1])
with pytest.raises(AssertionError):
ft_matching_loss([t1], [t2])
def test_features_matching_loss_output(self):
loss_nonorm = FeatureMatchingLoss(normalize=False)
loss_layer_normed = FeatureMatchingLoss(normalize=True)
length = random.randrange(1, 100_000)
t1 = torch.randn(1, 2, length)
t2 = torch.randn(1, 2, length)
assert loss_nonorm([t1, t2], [t1, t2]).item() == 0.0
assert loss_layer_normed([t1, t2], [t1, t2]).item() == 0.0
t3 = torch.FloatTensor([1.0, 2.0, 3.0])
t4 = torch.FloatTensor([2.0, 10.0, 3.0])
assert loss_nonorm([t3], [t4]).item() == 3.0
assert loss_nonorm([t3, t3], [t4, t4]).item() == 6.0
assert loss_layer_normed([t3], [t4]).item() == 3.0
assert loss_layer_normed([t3, t3], [t4, t4]).item() == 3.0
| audiocraft-main | tests/adversarial/test_losses.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/adversarial/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
from audiocraft.adversarial.discriminators import (
MultiPeriodDiscriminator,
MultiScaleDiscriminator,
MultiScaleSTFTDiscriminator
)
class TestMultiPeriodDiscriminator:
def test_mpd_discriminator(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
periods = [1, 2, 3]
mpd = MultiPeriodDiscriminator(periods=periods, in_channels=C)
logits, fmaps = mpd(t0)
assert len(logits) == len(periods)
assert len(fmaps) == len(periods)
assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
class TestMultiScaleDiscriminator:
def test_msd_discriminator(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
scale_norms = ['weight_norm', 'weight_norm']
msd = MultiScaleDiscriminator(scale_norms=scale_norms, in_channels=C)
logits, fmaps = msd(t0)
assert len(logits) == len(scale_norms)
assert len(fmaps) == len(scale_norms)
assert all([logit.shape[0] == N and len(logit.shape) == 3 for logit in logits])
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
class TestMultiScaleStftDiscriminator:
def test_msstftd_discriminator(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
n_filters = 4
n_ffts = [128, 256, 64]
hop_lengths = [32, 64, 16]
win_lengths = [128, 256, 64]
msstftd = MultiScaleSTFTDiscriminator(filters=n_filters, n_ffts=n_ffts, hop_lengths=hop_lengths,
win_lengths=win_lengths, in_channels=C)
logits, fmaps = msstftd(t0)
assert len(logits) == len(n_ffts)
assert len(fmaps) == len(n_ffts)
assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
| audiocraft-main | tests/adversarial/test_discriminators.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from audiocraft.models import MusicGen
class TestMusicGenModel:
def get_musicgen(self):
mg = MusicGen.get_pretrained(name='debug', device='cpu')
mg.set_generation_params(duration=2.0, extend_stride=2.)
return mg
def test_base(self):
mg = self.get_musicgen()
assert mg.frame_rate == 25
assert mg.sample_rate == 32000
assert mg.audio_channels == 1
def test_generate_unconditional(self):
mg = self.get_musicgen()
wav = mg.generate_unconditional(3)
assert list(wav.shape) == [3, 1, 64000]
def test_generate_continuation(self):
mg = self.get_musicgen()
prompt = torch.randn(3, 1, 32000)
wav = mg.generate_continuation(prompt, 32000)
assert list(wav.shape) == [3, 1, 64000]
prompt = torch.randn(2, 1, 32000)
wav = mg.generate_continuation(
prompt, 32000, ['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 64000]
prompt = torch.randn(2, 1, 32000)
with pytest.raises(AssertionError):
wav = mg.generate_continuation(
prompt, 32000, ['youpi', 'lapin dort', 'one too many'])
def test_generate(self):
mg = self.get_musicgen()
wav = mg.generate(
['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 64000]
def test_generate_long(self):
mg = self.get_musicgen()
mg.max_duration = 3.
mg.set_generation_params(duration=4., extend_stride=2.)
wav = mg.generate(
['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 32000 * 4]
| audiocraft-main | tests/models/test_musicgen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
from audiocraft.models import EncodecModel
from audiocraft.modules import SEANetEncoder, SEANetDecoder
from audiocraft.quantization import DummyQuantizer
class TestEncodecModel:
def _create_encodec_model(self,
sample_rate: int,
channels: int,
dim: int = 5,
n_filters: int = 3,
n_residual_layers: int = 1,
ratios: list = [5, 4, 3, 2],
**kwargs):
frame_rate = np.prod(ratios)
encoder = SEANetEncoder(channels=channels, dimension=dim, n_filters=n_filters,
n_residual_layers=n_residual_layers, ratios=ratios)
decoder = SEANetDecoder(channels=channels, dimension=dim, n_filters=n_filters,
n_residual_layers=n_residual_layers, ratios=ratios)
quantizer = DummyQuantizer()
model = EncodecModel(encoder, decoder, quantizer, frame_rate=frame_rate,
sample_rate=sample_rate, channels=channels, **kwargs)
return model
def test_model(self):
random.seed(1234)
sample_rate = 24_000
channels = 1
model = self._create_encodec_model(sample_rate, channels)
for _ in range(10):
length = random.randrange(1, 10_000)
x = torch.randn(2, channels, length)
res = model(x)
assert res.x.shape == x.shape
def test_model_renorm(self):
random.seed(1234)
sample_rate = 24_000
channels = 1
model_nonorm = self._create_encodec_model(sample_rate, channels, renormalize=False)
model_renorm = self._create_encodec_model(sample_rate, channels, renormalize=True)
for _ in range(10):
length = random.randrange(1, 10_000)
x = torch.randn(2, channels, length)
codes, scales = model_nonorm.encode(x)
codes, scales = model_renorm.encode(x)
assert scales is not None
| audiocraft-main | tests/models/test_encodec_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
from audiocraft.models.multibanddiffusion import MultiBandDiffusion, DiffusionProcess
from audiocraft.models import EncodecModel, DiffusionUnet
from audiocraft.modules import SEANetEncoder, SEANetDecoder
from audiocraft.modules.diffusion_schedule import NoiseSchedule
from audiocraft.quantization import DummyQuantizer
class TestMBD:
def _create_mbd(self,
sample_rate: int,
channels: int,
n_filters: int = 3,
n_residual_layers: int = 1,
ratios: list = [5, 4, 3, 2],
num_steps: int = 1000,
codec_dim: int = 128,
**kwargs):
frame_rate = np.prod(ratios)
encoder = SEANetEncoder(channels=channels, dimension=codec_dim, n_filters=n_filters,
n_residual_layers=n_residual_layers, ratios=ratios)
decoder = SEANetDecoder(channels=channels, dimension=codec_dim, n_filters=n_filters,
n_residual_layers=n_residual_layers, ratios=ratios)
quantizer = DummyQuantizer()
compression_model = EncodecModel(encoder, decoder, quantizer, frame_rate=frame_rate,
sample_rate=sample_rate, channels=channels, **kwargs)
diffusion_model = DiffusionUnet(chin=channels, num_steps=num_steps, codec_dim=codec_dim)
schedule = NoiseSchedule(device='cpu', num_steps=num_steps)
DP = DiffusionProcess(model=diffusion_model, noise_schedule=schedule)
mbd = MultiBandDiffusion(DPs=[DP], codec_model=compression_model)
return mbd
def test_model(self):
random.seed(1234)
sample_rate = 24_000
channels = 1
codec_dim = 128
mbd = self._create_mbd(sample_rate=sample_rate, channels=channels, codec_dim=codec_dim)
for _ in range(10):
length = random.randrange(1, 10_000)
x = torch.randn(2, channels, length)
res = mbd.regenerate(x, sample_rate)
assert res.shape == x.shape
| audiocraft-main | tests/models/test_multibanddiffusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from audiocraft.models import AudioGen
class TestAudioGenModel:
def get_audiogen(self):
ag = AudioGen.get_pretrained(name='debug', device='cpu')
ag.set_generation_params(duration=2.0, extend_stride=2.)
return ag
def test_base(self):
ag = self.get_audiogen()
assert ag.frame_rate == 25
assert ag.sample_rate == 16000
assert ag.audio_channels == 1
def test_generate_continuation(self):
ag = self.get_audiogen()
prompt = torch.randn(3, 1, 16000)
wav = ag.generate_continuation(prompt, 16000)
assert list(wav.shape) == [3, 1, 32000]
prompt = torch.randn(2, 1, 16000)
wav = ag.generate_continuation(
prompt, 16000, ['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 32000]
prompt = torch.randn(2, 1, 16000)
with pytest.raises(AssertionError):
wav = ag.generate_continuation(
prompt, 16000, ['youpi', 'lapin dort', 'one too many'])
def test_generate(self):
ag = self.get_audiogen()
wav = ag.generate(
['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 32000]
def test_generate_long(self):
ag = self.get_audiogen()
ag.max_duration = 3.
ag.set_generation_params(duration=4., extend_stride=2.)
wav = ag.generate(
['youpi', 'lapin dort'])
assert list(wav.shape) == [2, 1, 16000 * 4]
| audiocraft-main | tests/models/test_audiogen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from audiocraft.quantization.vq import ResidualVectorQuantizer
class TestResidualVectorQuantizer:
def test_rvq(self):
x = torch.randn(1, 16, 2048)
vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8)
res = vq(x, 1.)
assert res.x.shape == torch.Size([1, 16, 2048])
| audiocraft-main | tests/quantization/test_vq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from audiocraft.modules.activations import CustomGLU
class TestActivations:
def test_custom_glu_calculation(self):
activation = CustomGLU(nn.Identity())
initial_shape = (4, 8, 8)
part_a = torch.ones(initial_shape) * 2
part_b = torch.ones(initial_shape) * -1
input = torch.cat((part_a, part_b), dim=-1)
output = activation(input)
# ensure all dimensions match initial shape
assert output.shape == initial_shape
# ensure the gating was calculated correctly a * f(b)
assert torch.all(output == -2).item()
| audiocraft-main | tests/modules/test_activations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/modules/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from audiocraft.modules.rope import RotaryEmbedding
from audiocraft.modules.transformer import StreamingTransformer, set_efficient_attention_backend
def test_rope():
set_efficient_attention_backend('xformers')
B, T, H, C = 8, 75, 16, 128
rope = RotaryEmbedding(dim=C)
xq = torch.rand((B, T, H, C))
xk = torch.rand((B, T, H, C))
xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
assert list(xq_out.shape) == [B, T, H, C]
assert list(xk_out.shape) == [B, T, H, C]
def test_rope_io_dtypes():
set_efficient_attention_backend('xformers')
B, T, H, C = 8, 75, 16, 128
rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32)
rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64)
# Test bfloat16 inputs w/ both 32 and 64 precision rope.
xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16)
assert xq_out.dtype == torch.bfloat16
xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16)
assert xq_out.dtype == torch.bfloat16
# Test float32 inputs w/ both 32 and 64 precision rope.
xq_32 = torch.rand((B, T, H, C)).to(torch.float32)
xk_32 = torch.rand((B, T, H, C)).to(torch.float32)
xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32)
assert xq_out.dtype == torch.float32
xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32)
assert xq_out.dtype == torch.float32
def test_transformer_with_rope():
set_efficient_attention_backend('xformers')
torch.manual_seed(1234)
for pos in ['rope', 'sin_rope']:
tr = StreamingTransformer(
16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
positional_embedding=pos)
tr.eval()
steps = 12
x = torch.randn(3, steps, 16)
out = tr(x)
assert list(out.shape) == list(x.shape)
@torch.no_grad()
def test_rope_streaming():
set_efficient_attention_backend('xformers')
torch.manual_seed(1234)
tr = StreamingTransformer(
16, 4, 2, causal=True, dropout=0.,
custom=True, positional_embedding='rope')
tr.eval()
steps = 12
x = torch.randn(3, steps, 16)
ref = tr(x)
with tr.streaming():
outs = []
frame_sizes = [1] * steps
for frame_size in frame_sizes:
frame = x[:, :frame_size]
x = x[:, frame_size:]
outs.append(tr(frame))
out = torch.cat(outs, dim=1)
assert list(out.shape) == [3, steps, 16]
delta = torch.norm(out - ref) / torch.norm(out)
assert delta < 1e-6, delta
@torch.no_grad()
def test_rope_streaming_past_context():
set_efficient_attention_backend('xformers')
torch.manual_seed(1234)
for context in [None, 10]:
tr = StreamingTransformer(
16, 4, 1 if context else 2,
causal=True, past_context=context, custom=True,
dropout=0., positional_embedding='rope')
tr.eval()
steps = 20
x = torch.randn(3, steps, 16)
ref = tr(x)
with tr.streaming():
outs = []
frame_sizes = [1] * steps
for frame_size in frame_sizes:
frame = x[:, :frame_size]
x = x[:, frame_size:]
outs.append(tr(frame))
out = torch.cat(outs, dim=1)
assert list(out.shape) == [3, steps, 16]
delta = torch.norm(out - ref) / torch.norm(out)
assert delta < 1e-6, delta
def test_rope_memory_efficient():
set_efficient_attention_backend('xformers')
torch.manual_seed(1234)
tr = StreamingTransformer(
16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
positional_embedding='rope')
tr_mem_efficient = StreamingTransformer(
16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1,
positional_embedding='rope')
tr_mem_efficient.load_state_dict(tr.state_dict())
tr.eval()
steps = 12
x = torch.randn(3, steps, 16)
with torch.no_grad():
y = tr(x)
y2 = tr_mem_efficient(x)
# Check at float precision b/c this is the rope default.
assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm()
def test_rope_with_xpos():
set_efficient_attention_backend('xformers')
B, T, H, C = 8, 75, 16, 128
rope = RotaryEmbedding(dim=C, xpos=True)
xq = torch.rand((B, T, H, C))
xk = torch.rand((B, T, H, C))
xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
assert list(xq_out.shape) == [B, T, H, C]
assert list(xk_out.shape) == [B, T, H, C]
def test_positional_scale():
set_efficient_attention_backend('xformers')
B, T, H, C = 8, 75, 16, 128
rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0)
xq = torch.rand((B, T, H, C))
xk = torch.rand((B, T, H, C))
xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
assert torch.allclose(xq, xq_out)
assert torch.allclose(xk, xk_out)
| audiocraft-main | tests/modules/test_rope.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from audiocraft.modules.codebooks_patterns import (
DelayedPatternProvider,
ParallelPatternProvider,
Pattern,
UnrolledPatternProvider,
)
class TestParallelPatternProvider:
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
def test_get_pattern(self, n_q: int, timesteps: int):
provider = ParallelPatternProvider(n_q)
pattern = provider.get_pattern(timesteps)
# + 1 to account for 1st step
assert len(pattern.layout) == timesteps + 1
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [8, 16, 100])
def test_pattern_content(self, n_q: int, timesteps: int):
provider = ParallelPatternProvider(n_q)
pattern = provider.get_pattern(timesteps)
for s, v in enumerate(pattern.layout):
for i, code in enumerate(v):
assert i == code.q
assert code.t == s - 1 # account for the 1st empty step
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [8, 16, 100])
def test_pattern_max_delay(self, n_q: int, timesteps: int):
provider = ParallelPatternProvider(n_q)
pattern = provider.get_pattern(timesteps)
assert pattern.max_delay == 0
assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
class TestDelayedPatternProvider:
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
def test_get_pattern(self, n_q: int, timesteps: int):
delays = [
list(range(n_q)),
[0] + [1] * (n_q - 1),
[0] + [4] * (n_q - 1),
]
for delay in delays:
provider = DelayedPatternProvider(n_q, delay)
pattern = provider.get_pattern(timesteps)
# + 1 to account for 1st step
assert len(pattern.layout) == timesteps + max(delay) + 1
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [8, 16, 100])
def test_pattern_content(self, n_q: int, timesteps: int):
provider = DelayedPatternProvider(n_q)
pattern = provider.get_pattern(timesteps)
for s, v in enumerate(pattern.layout):
for i, code in enumerate(v):
assert i == code.q
assert code.t == max(0, s - code.q - 1)
@pytest.mark.parametrize("timesteps", [8, 16, 100])
@pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]])
def test_pattern_max_delay(self, timesteps: int, delay: list):
provider = DelayedPatternProvider(len(delay), delay)
pattern = provider.get_pattern(timesteps)
assert pattern.max_delay == max(delay)
assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
class TestUnrolledPatternProvider:
@pytest.mark.parametrize("timesteps", [0, 1, 16])
@pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
@pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
def test_get_pattern(self, timesteps: int, flattening: list, delays: list):
n_q = len(flattening)
max_delay = max(delays)
provider = UnrolledPatternProvider(n_q, flattening, delays)
pattern = provider.get_pattern(timesteps)
assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay
@pytest.mark.parametrize("timesteps", [0, 1, 16])
@pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
@pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list):
n_q = len(flattening)
max_delay = max(delays)
provider = UnrolledPatternProvider(n_q, flattening, delays)
pattern = provider.get_pattern(timesteps)
assert pattern.max_delay == max_delay
class TestPattern:
def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
"""Reference method to build the sequence from the pattern without using fancy scatter."""
bs, n_q, T = z.shape
z = z.cpu().numpy()
assert n_q == pattern.n_q
assert T <= pattern.timesteps
inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy()
inp[:] = special_token
for s, v in enumerate(pattern.layout):
for (t, q) in v:
if t < T:
inp[:, q, s] = z[:, q, t]
return torch.from_numpy(inp)
def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
"""Reference method to revert the sequence from the pattern without using fancy scatter."""
z = z.cpu().numpy()
bs, n_q, S = z.shape
assert pattern.n_q == n_q
inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy()
inp[:] = special_token
for s, v in enumerate(pattern.layout):
for (t, q) in v:
if t < pattern.timesteps:
inp[:, q, t] = z[:, q, s]
return torch.from_numpy(inp)
def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float):
"""Reference method to revert the logits from the pattern without using fancy scatter."""
z = z.cpu().numpy()
bs, card, n_q, S = z.shape
assert pattern.n_q == n_q
ref_layout = pattern.layout
inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy()
inp[:] = special_token
for s, v in enumerate(ref_layout[1:]):
if s < S:
for (t, q) in v:
if t < pattern.timesteps:
inp[:, :, q, t] = z[:, :, q, s]
return torch.from_numpy(inp)
def _get_pattern_providers(self, n_q: int):
pattern_provider_1 = ParallelPatternProvider(n_q)
pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q)))
pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1))
pattern_provider_4 = UnrolledPatternProvider(
n_q, flattening=list(range(n_q)), delays=[0] * n_q
)
pattern_provider_5 = UnrolledPatternProvider(
n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q
)
pattern_provider_6 = UnrolledPatternProvider(
n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1)
)
return [
pattern_provider_1,
pattern_provider_2,
pattern_provider_3,
pattern_provider_4,
pattern_provider_5,
pattern_provider_6,
]
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [16, 72])
def test_build_pattern_sequence(self, n_q: int, timesteps: int):
bs = 2
card = 256
special_token = card
pattern_providers = self._get_pattern_providers(n_q)
for pattern_provider in pattern_providers:
pattern = pattern_provider.get_pattern(timesteps)
# we can correctly build the sequence from the pattern
z = torch.randint(0, card, (bs, n_q, timesteps))
ref_res = self.ref_build_pattern_sequence(z, pattern, special_token)
res, indexes, mask = pattern.build_pattern_sequence(z, special_token)
assert (res == ref_res).float().mean() == 1.0
# expected assertion fails on the number of timesteps
invalid_timesteps = [timesteps + 1]
if pattern.num_sequence_steps != pattern.timesteps:
invalid_timesteps.append(pattern.num_sequence_steps)
for i_timesteps in invalid_timesteps:
z2 = torch.randint(0, card, (bs, n_q, i_timesteps))
with pytest.raises(AssertionError):
pattern.build_pattern_sequence(z2, special_token)
# expected assertion fails on the number of codebooks
invalid_qs = [0, n_q - 1, n_q + 1]
for i_q in invalid_qs:
z3 = torch.randint(0, card, (bs, i_q, timesteps))
with pytest.raises(AssertionError):
pattern.build_pattern_sequence(z3, special_token)
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [16, 72])
def test_revert_pattern_sequence(self, n_q: int, timesteps: int):
bs = 2
card = 256
special_token = card
pattern_providers = self._get_pattern_providers(n_q)
for pattern_provider in pattern_providers:
pattern = pattern_provider.get_pattern(timesteps)
# this works assuming previous tests are successful
z = torch.randint(0, card, (bs, n_q, timesteps))
s = self.ref_build_pattern_sequence(z, pattern, special_token)
ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token)
# ensure our reference script retrieve the original sequence
assert z.shape == ref_out.shape
assert (z == ref_out).float().mean() == 1.0
# now we can test the scatter version
out, indexes, mask = pattern.revert_pattern_sequence(s, special_token)
assert out.shape == ref_out.shape
assert (out == ref_out).float().mean() == 1.0
@pytest.mark.parametrize("n_q", [1, 4, 32])
@pytest.mark.parametrize("timesteps", [16, 72])
@pytest.mark.parametrize("card", [1, 2, 256, 1024])
def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int):
bs = 2
special_token = card
logits_special_token = float('nan')
pattern_providers = self._get_pattern_providers(n_q)
for pattern_provider in pattern_providers:
pattern = pattern_provider.get_pattern(timesteps)
# this works assuming previous tests are successful
z = torch.randint(0, card, (bs, n_q, timesteps))
s = self.ref_build_pattern_sequence(z, pattern, special_token)
logits = torch.randn((bs, card, n_q, s.shape[-1]))
ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token)
# ensure our reference script retrieve the original sequence
assert ref_out.shape == torch.Size([bs, card, n_q, timesteps])
# now we can test the scatter version
out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token)
assert out.shape == ref_out.shape
assert (out == ref_out).float().mean() == 1.0
| audiocraft-main | tests/modules/test_codebooks_patterns.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import pytest
import torch
from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock
from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d
class TestSEANetModel:
def test_base(self):
encoder = SEANetEncoder()
decoder = SEANetDecoder()
x = torch.randn(1, 1, 24000)
z = encoder(x)
assert list(z.shape) == [1, 128, 75], z.shape
y = decoder(z)
assert y.shape == x.shape, (x.shape, y.shape)
def test_causal(self):
encoder = SEANetEncoder(causal=True)
decoder = SEANetDecoder(causal=True)
x = torch.randn(1, 1, 24000)
z = encoder(x)
assert list(z.shape) == [1, 128, 75], z.shape
y = decoder(z)
assert y.shape == x.shape, (x.shape, y.shape)
def test_conv_skip_connection(self):
encoder = SEANetEncoder(true_skip=False)
decoder = SEANetDecoder(true_skip=False)
x = torch.randn(1, 1, 24000)
z = encoder(x)
assert list(z.shape) == [1, 128, 75], z.shape
y = decoder(z)
assert y.shape == x.shape, (x.shape, y.shape)
def test_seanet_encoder_decoder_final_act(self):
encoder = SEANetEncoder(true_skip=False)
decoder = SEANetDecoder(true_skip=False, final_activation='Tanh')
x = torch.randn(1, 1, 24000)
z = encoder(x)
assert list(z.shape) == [1, 128, 75], z.shape
y = decoder(z)
assert y.shape == x.shape, (x.shape, y.shape)
def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str):
n_blocks = 0
for layer in encoder.model:
if isinstance(layer, StreamableConv1d):
n_blocks += 1
assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm
elif isinstance(layer, SEANetResnetBlock):
for resnet_layer in layer.block:
if isinstance(resnet_layer, StreamableConv1d):
# here we add + 1 to n_blocks as we increment n_blocks just after the block
assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm
def test_encoder_disable_norm(self):
n_residuals = [0, 1, 3]
disable_blocks = [0, 1, 2, 3, 4, 5, 6]
norms = ['weight_norm', 'none']
for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm,
disable_norm_outer_blocks=disable_blocks)
self._check_encoder_blocks_norm(encoder, disable_blocks, norm)
def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str):
n_blocks = 0
for layer in decoder.model:
if isinstance(layer, StreamableConv1d):
n_blocks += 1
assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
elif isinstance(layer, StreamableConvTranspose1d):
n_blocks += 1
assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
elif isinstance(layer, SEANetResnetBlock):
for resnet_layer in layer.block:
if isinstance(resnet_layer, StreamableConv1d):
assert resnet_layer.conv.norm_type == 'none' \
if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
def test_decoder_disable_norm(self):
n_residuals = [0, 1, 3]
disable_blocks = [0, 1, 2, 3, 4, 5, 6]
norms = ['weight_norm', 'none']
for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm,
disable_norm_outer_blocks=disable_blocks)
self._check_decoder_blocks_norm(decoder, disable_blocks, norm)
def test_disable_norm_raises_exception(self):
# Invalid disable_norm_outer_blocks values raise exceptions
with pytest.raises(AssertionError):
SEANetEncoder(disable_norm_outer_blocks=-1)
with pytest.raises(AssertionError):
SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
with pytest.raises(AssertionError):
SEANetDecoder(disable_norm_outer_blocks=-1)
with pytest.raises(AssertionError):
SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
| audiocraft-main | tests/modules/test_seanet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
from audiocraft.modules.lstm import StreamableLSTM
class TestStreamableLSTM:
def test_lstm(self):
B, C, T = 4, 2, random.randint(1, 100)
lstm = StreamableLSTM(C, 3, skip=False)
x = torch.randn(B, C, T)
y = lstm(x)
print(y.shape)
assert y.shape == torch.Size([B, C, T])
def test_lstm_skip(self):
B, C, T = 4, 2, random.randint(1, 100)
lstm = StreamableLSTM(C, 3, skip=True)
x = torch.randn(B, C, T)
y = lstm(x)
assert y.shape == torch.Size([B, C, T])
| audiocraft-main | tests/modules/test_lstm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import pytest
import torch
from audiocraft.modules.transformer import (
StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend)
def test_transformer_causal_streaming():
torch.manual_seed(1234)
for context, custom in product([None, 10], [False, True]):
# Test that causality and receptive fields are properly handled.
# looking at the gradients
tr = StreamingTransformer(
16, 4, 1 if context else 2,
causal=True, past_context=context, custom=custom,
dropout=0.)
steps = 20
for k in [0, 10, 15, 19]:
x = torch.randn(4, steps, 16, requires_grad=True)
y = tr(x)
y[:, k].abs().sum().backward()
if k + 1 < steps:
assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm()
assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm()
if context is not None and k > context:
limit = k - context - 1
assert torch.allclose(x.grad[:, :limit],
torch.tensor(0.)), x.grad[:, :limit].norm()
# Now check that streaming gives the same result at batch eval.
x = torch.randn(4, steps, 16)
y = tr(x)
ys = []
with tr.streaming():
for k in range(steps):
chunk = x[:, k:k + 1, :]
ys.append(tr(chunk))
y_stream = torch.cat(ys, dim=1)
delta = torch.norm(y_stream - y) / torch.norm(y)
assert delta < 1e-6, delta
def test_transformer_vs_pytorch():
torch.manual_seed(1234)
# Check that in the non causal setting, we get the same result as
# PyTorch Transformer encoder.
for custom in [False, True]:
tr = StreamingTransformer(
16, 4, 2,
causal=False, custom=custom, dropout=0., positional_scale=0.)
layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True)
tr_ref = torch.nn.TransformerEncoder(layer, 2)
tr.load_state_dict(tr_ref.state_dict())
x = torch.randn(4, 20, 16)
y = tr(x)
y2 = tr_ref(x)
delta = torch.norm(y2 - y) / torch.norm(y)
assert delta < 1e-6, delta
def test_streaming_api():
tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.)
tr.eval()
steps = 12
x = torch.randn(1, steps, 16)
with torch.no_grad():
with tr.streaming():
_ = tr(x[:, :1])
state = {k: v.clone() for k, v in tr.get_streaming_state().items()}
y = tr(x[:, 1:2])
tr.set_streaming_state(state)
y2 = tr(x[:, 1:2])
assert torch.allclose(y, y2), (y - y2).norm()
assert tr.flush() is None
def test_memory_efficient():
for backend in ['torch', 'xformers']:
torch.manual_seed(1234)
set_efficient_attention_backend(backend)
tr = StreamingTransformer(
16, 4, 2, custom=True, dropout=0., layer_scale=0.1)
tr_mem_efficient = StreamingTransformer(
16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1)
tr_mem_efficient.load_state_dict(tr.state_dict())
tr.eval()
steps = 12
x = torch.randn(3, steps, 16)
with torch.no_grad():
y = tr(x)
y2 = tr_mem_efficient(x)
assert torch.allclose(y, y2), ((y - y2).norm(), backend)
def test_attention_as_float32():
torch.manual_seed(1234)
cases = [
{'custom': True},
{'custom': False},
]
for case in cases:
tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case)
tr_float32 = StreamingTransformer(
16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case)
if not case['custom']:
# we are not using autocast here because it doesn't really
# work as expected on CPU, so we have to manually cast the weights of the MHA.
for layer in tr_float32.layers:
layer.self_attn.mha.to(torch.float32)
tr_float32.load_state_dict(tr.state_dict())
steps = 12
x = torch.randn(3, steps, 16, dtype=torch.bfloat16)
with torch.no_grad():
y = tr(x)
y2 = tr_float32(x)
assert not torch.allclose(y, y2), (y - y2).norm()
@torch.no_grad()
def test_streaming_memory_efficient():
for backend in ['torch', 'xformers']:
torch.manual_seed(1234)
set_efficient_attention_backend(backend)
tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True)
tr_mem_efficient = StreamingTransformer(
16, 4, 2, dropout=0., memory_efficient=True, causal=True)
tr.load_state_dict(tr_mem_efficient.state_dict())
tr.eval()
tr_mem_efficient.eval()
steps = 12
x = torch.randn(3, steps, 16)
ref = tr(x)
with tr_mem_efficient.streaming():
outs = []
# frame_sizes = [2] + [1] * (steps - 2)
frame_sizes = [1] * steps
for frame_size in frame_sizes:
frame = x[:, :frame_size]
x = x[:, frame_size:]
outs.append(tr_mem_efficient(frame))
out = torch.cat(outs, dim=1)
delta = torch.norm(out - ref) / torch.norm(out)
assert delta < 1e-6, delta
def test_cross_attention():
torch.manual_seed(1234)
for norm_first in [True, False]:
m = StreamingTransformer(
16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True)
m_cross = StreamingTransformer(
16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True)
m_cross.load_state_dict(m.state_dict(), strict=False)
x = torch.randn(2, 5, 16)
cross_x = torch.randn(2, 3, 16)
y_ref = m(x)
y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x)
# With norm_first, the two should be exactly the same,
# but with norm_first=False, we get 2 normalization in a row
# and the epsilon value leads to a tiny change.
atol = 0. if norm_first else 1e-6
print((y_ref - y_cross_zero).norm() / y_ref.norm())
assert torch.allclose(y_ref, y_cross_zero, atol=atol)
# We now expect a difference even with a generous atol of 1e-2.
y_cross = m_cross(x, cross_attention_src=cross_x)
assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2)
with pytest.raises(AssertionError):
_ = m_cross(x)
_ = m(x, cross_attention_src=cross_x)
def test_cross_attention_compat():
torch.manual_seed(1234)
num_heads = 2
dim = num_heads * 64
with pytest.raises(AssertionError):
StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True)
cross_attn = StreamingMultiheadAttention(
dim, num_heads, dropout=0, cross_attention=True, custom=True)
ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True)
# We can load the regular attention state dict
# so we have compat when loading old checkpoints.
cross_attn.load_state_dict(ref_attn.state_dict())
queries = torch.randn(3, 7, dim)
keys = torch.randn(3, 9, dim)
values = torch.randn(3, 9, dim)
y = cross_attn(queries, keys, values)[0]
y_ref = ref_attn(queries, keys, values)[0]
assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm()
# Now let's check that streaming is working properly.
with cross_attn.streaming():
ys = []
for step in range(queries.shape[1]):
ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0])
y_streaming = torch.cat(ys, dim=1)
assert torch.allclose(y_streaming, y, atol=1e-7)
def test_repeat_kv():
torch.manual_seed(1234)
num_heads = 8
kv_repeat = 4
dim = num_heads * 64
with pytest.raises(AssertionError):
mha = StreamingMultiheadAttention(
dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True)
mha = StreamingMultiheadAttention(
dim, num_heads, causal=True, kv_repeat=kv_repeat)
mha = StreamingMultiheadAttention(
dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True)
x = torch.randn(4, 18, dim)
y = mha(x, x, x)[0]
assert x.shape == y.shape
def test_qk_layer_norm():
torch.manual_seed(1234)
tr = StreamingTransformer(
16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False)
steps = 12
x = torch.randn(3, steps, 16)
y = tr(x)
tr = StreamingTransformer(
16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True)
z = torch.randn(3, 21, 16)
y = tr(x, cross_attention_src=z)
assert y.shape == x.shape
| audiocraft-main | tests/modules/test_transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import math
import random
import pytest
import torch
from torch import nn
from audiocraft.modules import (
NormConv1d,
NormConvTranspose1d,
StreamableConv1d,
StreamableConvTranspose1d,
pad1d,
unpad1d,
)
def test_get_extra_padding_for_conv1d():
# TODO: Implement me!
pass
def test_pad1d_zeros():
x = torch.randn(1, 1, 20)
xp1 = pad1d(x, (0, 5), mode='constant', value=0.)
assert xp1.shape[-1] == 25
xp2 = pad1d(x, (5, 5), mode='constant', value=0.)
assert xp2.shape[-1] == 30
xp3 = pad1d(x, (0, 0), mode='constant', value=0.)
assert xp3.shape[-1] == 20
xp4 = pad1d(x, (10, 30), mode='constant', value=0.)
assert xp4.shape[-1] == 60
with pytest.raises(AssertionError):
pad1d(x, (-1, 0), mode='constant', value=0.)
with pytest.raises(AssertionError):
pad1d(x, (0, -1), mode='constant', value=0.)
with pytest.raises(AssertionError):
pad1d(x, (-1, -1), mode='constant', value=0.)
def test_pad1d_reflect():
x = torch.randn(1, 1, 20)
xp1 = pad1d(x, (0, 5), mode='reflect', value=0.)
assert xp1.shape[-1] == 25
xp2 = pad1d(x, (5, 5), mode='reflect', value=0.)
assert xp2.shape[-1] == 30
xp3 = pad1d(x, (0, 0), mode='reflect', value=0.)
assert xp3.shape[-1] == 20
xp4 = pad1d(x, (10, 30), mode='reflect', value=0.)
assert xp4.shape[-1] == 60
with pytest.raises(AssertionError):
pad1d(x, (-1, 0), mode='reflect', value=0.)
with pytest.raises(AssertionError):
pad1d(x, (0, -1), mode='reflect', value=0.)
with pytest.raises(AssertionError):
pad1d(x, (-1, -1), mode='reflect', value=0.)
def test_unpad1d():
x = torch.randn(1, 1, 20)
u1 = unpad1d(x, (5, 5))
assert u1.shape[-1] == 10
u2 = unpad1d(x, (0, 5))
assert u2.shape[-1] == 15
u3 = unpad1d(x, (5, 0))
assert u3.shape[-1] == 15
u4 = unpad1d(x, (0, 0))
assert u4.shape[-1] == x.shape[-1]
with pytest.raises(AssertionError):
unpad1d(x, (-1, 0))
with pytest.raises(AssertionError):
unpad1d(x, (0, -1))
with pytest.raises(AssertionError):
unpad1d(x, (-1, -1))
class TestNormConv1d:
def test_norm_conv1d_modules(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
C_out, kernel_size, stride = 1, 4, 1
expected_out_length = int((T - kernel_size) / stride + 1)
wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm')
gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm')
nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none')
assert isinstance(wn_conv.norm, nn.Identity)
assert isinstance(wn_conv.conv, nn.Conv1d)
assert isinstance(gn_conv.norm, nn.GroupNorm)
assert isinstance(gn_conv.conv, nn.Conv1d)
assert isinstance(nn_conv.norm, nn.Identity)
assert isinstance(nn_conv.conv, nn.Conv1d)
for conv_layer in [wn_conv, gn_conv, nn_conv]:
out = conv_layer(t0)
assert isinstance(out, torch.Tensor)
assert list(out.shape) == [N, C_out, expected_out_length]
class TestNormConvTranspose1d:
def test_normalizations(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
C_out, kernel_size, stride = 1, 4, 1
expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1
wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm')
gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm')
nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none')
assert isinstance(wn_convtr.norm, nn.Identity)
assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d)
assert isinstance(gn_convtr.norm, nn.GroupNorm)
assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d)
assert isinstance(nn_convtr.norm, nn.Identity)
assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d)
for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]:
out = convtr_layer(t0)
assert isinstance(out, torch.Tensor)
assert list(out.shape) == [N, C_out, expected_out_length]
class TestStreamableConv1d:
def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation):
# StreamableConv1d internally pads to make sure that the last window is full
padding_total = (kernel_size - 1) * dilation - (stride - 1)
n_frames = (length - kernel_size + padding_total) / stride + 1
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
return ideal_length // stride
def test_streamable_conv1d(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
C_out = 1
# conv params are [(kernel_size, stride, dilation)]
conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)]
for causal, (kernel_size, stride, dilation) in product([False, True], conv_params):
expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation)
sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal)
out = sconv(t0)
assert isinstance(out, torch.Tensor)
print(list(out.shape), [N, C_out, expected_out_length])
assert list(out.shape) == [N, C_out, expected_out_length]
class TestStreamableConvTranspose1d:
def get_streamable_convtr1d_output_length(self, length, kernel_size, stride):
padding_total = (kernel_size - stride)
return (length - 1) * stride - padding_total + (kernel_size - 1) + 1
def test_streamable_convtr1d(self):
N, C, T = 2, 2, random.randrange(1, 100_000)
t0 = torch.randn(N, C, T)
C_out = 1
with pytest.raises(AssertionError):
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5)
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.)
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2)
# causal params are [(causal, trim_right)]
causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)]
# conv params are [(kernel_size, stride)]
conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)]
for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params):
expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride)
sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride,
causal=causal, trim_right_ratio=trim_right_ratio)
out = sconvtr(t0)
assert isinstance(out, torch.Tensor)
assert list(out.shape) == [N, C_out, expected_out_length]
| audiocraft-main | tests/modules/test_conv.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from itertools import product
import json
import math
import os
import random
import typing as tp
import pytest
import torch
from torch.utils.data import DataLoader
from audiocraft.data.audio_dataset import (
AudioDataset,
AudioMeta,
_get_audio_meta,
load_audio_meta,
save_audio_meta
)
from audiocraft.data.zip import PathInZip
from ..common_utils import TempDirMixin, get_white_noise, save_wav
class TestAudioMeta(TempDirMixin):
def test_get_audio_meta(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(duration * sample_rate)
wav = get_white_noise(ch, n_frames)
path = self.get_temp_path('sample.wav')
save_wav(path, wav, sample_rate)
m = _get_audio_meta(path, minimal=True)
assert m.path == path, 'path does not match'
assert m.sample_rate == sample_rate, 'sample rate does not match'
assert m.duration == duration, 'duration does not match'
assert m.amplitude is None
assert m.info_path is None
def test_save_audio_meta(self):
audio_meta = [
AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
]
empty_audio_meta = []
for idx, meta in enumerate([audio_meta, empty_audio_meta]):
path = self.get_temp_path(f'data_{idx}_save.jsonl')
save_audio_meta(path, meta)
with open(path, 'r') as f:
lines = f.readlines()
read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines]
assert len(read_meta) == len(meta)
for m, read_m in zip(meta, read_meta):
assert m == read_m
def test_load_audio_meta(self):
try:
import dora
except ImportError:
dora = None # type: ignore
audio_meta = [
AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')),
AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json'))
]
empty_meta = []
for idx, meta in enumerate([audio_meta, empty_meta]):
path = self.get_temp_path(f'data_{idx}_load.jsonl')
with open(path, 'w') as f:
for m in meta:
json_str = json.dumps(m.to_dict()) + '\n'
f.write(json_str)
read_meta = load_audio_meta(path)
assert len(read_meta) == len(meta)
for m, read_m in zip(meta, read_meta):
if dora:
m.path = dora.git_save.to_absolute_path(m.path)
assert m == read_m, f'original={m}, read={read_m}'
class TestAudioDataset(TempDirMixin):
def _create_audio_files(self,
root_name: str,
num_examples: int,
durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
sample_rate: int = 16_000,
channels: int = 1):
root_dir = self.get_temp_dir(root_name)
for i in range(num_examples):
if isinstance(durations, float):
duration = durations
elif isinstance(durations, tuple) and len(durations) == 1:
duration = durations[0]
elif isinstance(durations, tuple) and len(durations) == 2:
duration = random.uniform(durations[0], durations[1])
else:
assert False
n_frames = int(duration * sample_rate)
wav = get_white_noise(channels, n_frames)
path = os.path.join(root_dir, f'example_{i}.wav')
save_wav(path, wav, sample_rate)
return root_dir
def _create_audio_dataset(self,
root_name: str,
total_num_examples: int,
durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.),
sample_rate: int = 16_000,
channels: int = 1,
segment_duration: tp.Optional[float] = None,
num_examples: int = 10,
shuffle: bool = True,
return_info: bool = False):
root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels)
dataset = AudioDataset.from_path(root_dir,
minimal_meta=True,
segment_duration=segment_duration,
num_samples=num_examples,
sample_rate=sample_rate,
channels=channels,
shuffle=shuffle,
return_info=return_info)
return dataset
def test_dataset_full(self):
total_examples = 10
min_duration, max_duration = 1., 4.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration),
sample_rate=sample_rate, channels=channels, segment_duration=None)
assert len(dataset) == total_examples
assert dataset.sample_rate == sample_rate
assert dataset.channels == channels
for idx in range(len(dataset)):
sample = dataset[idx]
assert sample.shape[0] == channels
assert sample.shape[1] <= int(max_duration * sample_rate)
assert sample.shape[1] >= int(min_duration * sample_rate)
def test_dataset_segment(self):
total_examples = 10
num_samples = 20
min_duration, max_duration = 1., 4.
segment_duration = 1.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples)
assert len(dataset) == num_samples
assert dataset.sample_rate == sample_rate
assert dataset.channels == channels
for idx in range(len(dataset)):
sample = dataset[idx]
assert sample.shape[0] == channels
assert sample.shape[1] == int(segment_duration * sample_rate)
def test_dataset_equal_audio_and_segment_durations(self):
total_examples = 1
num_samples = 2
audio_duration = 1.
segment_duration = 1.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples)
assert len(dataset) == num_samples
assert dataset.sample_rate == sample_rate
assert dataset.channels == channels
for idx in range(len(dataset)):
sample = dataset[idx]
assert sample.shape[0] == channels
assert sample.shape[1] == int(segment_duration * sample_rate)
# the random seek_time adds variability on audio read
sample_1 = dataset[0]
sample_2 = dataset[1]
assert not torch.allclose(sample_1, sample_2)
def test_dataset_samples(self):
total_examples = 1
num_samples = 2
audio_duration = 1.
segment_duration = 1.
sample_rate = 16_000
channels = 1
create_dataset = partial(
self._create_audio_dataset,
'dset', total_examples, durations=audio_duration, sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples,
)
dataset = create_dataset(shuffle=True)
# when shuffle = True, we have different inputs for the same index across epoch
sample_1 = dataset[0]
sample_2 = dataset[0]
assert not torch.allclose(sample_1, sample_2)
dataset_noshuffle = create_dataset(shuffle=False)
# when shuffle = False, we have same inputs for the same index across epoch
sample_1 = dataset_noshuffle[0]
sample_2 = dataset_noshuffle[0]
assert torch.allclose(sample_1, sample_2)
def test_dataset_return_info(self):
total_examples = 10
num_samples = 20
min_duration, max_duration = 1., 4.
segment_duration = 1.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
assert len(dataset) == num_samples
assert dataset.sample_rate == sample_rate
assert dataset.channels == channels
for idx in range(len(dataset)):
sample, segment_info = dataset[idx]
assert sample.shape[0] == channels
assert sample.shape[1] == int(segment_duration * sample_rate)
assert segment_info.sample_rate == sample_rate
assert segment_info.total_frames == int(segment_duration * sample_rate)
assert segment_info.n_frames <= int(segment_duration * sample_rate)
assert segment_info.seek_time >= 0
def test_dataset_return_info_no_segment_duration(self):
total_examples = 10
num_samples = 20
min_duration, max_duration = 1., 4.
segment_duration = None
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
assert len(dataset) == total_examples
assert dataset.sample_rate == sample_rate
assert dataset.channels == channels
for idx in range(len(dataset)):
sample, segment_info = dataset[idx]
assert sample.shape[0] == channels
assert sample.shape[1] == segment_info.total_frames
assert segment_info.sample_rate == sample_rate
assert segment_info.n_frames <= segment_info.total_frames
def test_dataset_collate_fn(self):
total_examples = 10
num_samples = 20
min_duration, max_duration = 1., 4.
segment_duration = 1.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False)
batch_size = 4
dataloader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=0
)
for idx, batch in enumerate(dataloader):
assert batch.shape[0] == batch_size
@pytest.mark.parametrize("segment_duration", [1.0, None])
def test_dataset_with_meta_collate_fn(self, segment_duration):
total_examples = 10
num_samples = 20
min_duration, max_duration = 1., 4.
segment_duration = 1.
sample_rate = 16_000
channels = 1
dataset = self._create_audio_dataset(
'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate,
channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True)
batch_size = 4
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collater,
num_workers=0
)
for idx, batch in enumerate(dataloader):
wav, infos = batch
assert wav.shape[0] == batch_size
assert len(infos) == batch_size
@pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [
[1, True, True, 0.5, 0.5, 0.0],
[1, False, True, 0.25, 0.5, 0.25],
[1, True, False, 0.666, 0.333, 0.0],
[1, False, False, 0.333, 0.333, 0.333],
[None, False, False, 0.333, 0.333, 0.333]])
def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist):
random.seed(1234)
rng = torch.Generator()
rng.manual_seed(1234)
def _get_histogram(dataset, repetitions=20_000):
counts = {file_meta.path: 0. for file_meta in meta}
for _ in range(repetitions):
file_meta = dataset.sample_file(0, rng)
counts[file_meta.path] += 1
return {name: count / repetitions for name, count in counts.items()}
meta = [
AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
]
dataset = AudioDataset(
meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight,
sample_on_duration=sample_on_duration)
hist = _get_histogram(dataset)
assert math.isclose(hist['a'], a_hist, abs_tol=0.01)
assert math.isclose(hist['b'], b_hist, abs_tol=0.01)
assert math.isclose(hist['c'], c_hist, abs_tol=0.01)
def test_meta_duration_filter_all(self):
meta = [
AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
]
try:
AudioDataset(meta, segment_duration=11, min_segment_ratio=1)
assert False
except AssertionError:
assert True
def test_meta_duration_filter_long(self):
meta = [
AudioMeta(path='a', duration=5, sample_rate=1, weight=2),
AudioMeta(path='b', duration=10, sample_rate=1, weight=None),
AudioMeta(path='c', duration=5, sample_rate=1, weight=0),
]
dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7)
assert len(dataset) == 2
| audiocraft-main | tests/data/test_audio_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from itertools import product
import random
import numpy as np
import torch
import torchaudio
from audiocraft.data.audio import audio_info, audio_read, audio_write, _av_read
from ..common_utils import TempDirMixin, get_white_noise, save_wav
class TestInfo(TempDirMixin):
def test_info_mp3(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
wav = get_white_noise(ch, int(sample_rate * duration))
path = self.get_temp_path('sample_wav.mp3')
save_wav(path, wav, sample_rate)
info = audio_info(path)
assert info.sample_rate == sample_rate
assert info.channels == ch
# we cannot trust torchaudio for num_frames, so we don't check
def _test_info_format(self, ext: str):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames)
path = self.get_temp_path(f'sample_wav{ext}')
save_wav(path, wav, sample_rate)
info = audio_info(path)
assert info.sample_rate == sample_rate
assert info.channels == ch
assert np.isclose(info.duration, duration, atol=1e-5)
def test_info_wav(self):
self._test_info_format('.wav')
def test_info_flac(self):
self._test_info_format('.flac')
def test_info_ogg(self):
self._test_info_format('.ogg')
def test_info_m4a(self):
# TODO: generate m4a file programmatically
# self._test_info_format('.m4a')
pass
class TestRead(TempDirMixin):
def test_read_full_wav(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99)
path = self.get_temp_path('sample_wav.wav')
save_wav(path, wav, sample_rate)
read_wav, read_sr = audio_read(path)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[1] == wav.shape[1]
assert torch.allclose(read_wav, wav, rtol=1e-03, atol=1e-04)
def test_read_partial_wav(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
read_duration = torch.rand(1).item()
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
read_frames = int(sample_rate * read_duration)
wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99)
path = self.get_temp_path('sample_wav.wav')
save_wav(path, wav, sample_rate)
read_wav, read_sr = audio_read(path, 0, read_duration)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[1] == read_frames
assert torch.allclose(read_wav[..., 0:read_frames], wav[..., 0:read_frames], rtol=1e-03, atol=1e-04)
def test_read_seek_time_wav(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
read_duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99)
path = self.get_temp_path('sample_wav.wav')
save_wav(path, wav, sample_rate)
seek_time = torch.rand(1).item()
read_wav, read_sr = audio_read(path, seek_time, read_duration)
seek_frames = int(sample_rate * seek_time)
expected_frames = n_frames - seek_frames
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[1] == expected_frames
assert torch.allclose(read_wav, wav[..., seek_frames:], rtol=1e-03, atol=1e-04)
def test_read_seek_time_wav_padded(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
read_duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
read_frames = int(sample_rate * read_duration)
wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99)
path = self.get_temp_path('sample_wav.wav')
save_wav(path, wav, sample_rate)
seek_time = torch.rand(1).item()
seek_frames = int(sample_rate * seek_time)
expected_frames = n_frames - seek_frames
read_wav, read_sr = audio_read(path, seek_time, read_duration, pad=True)
expected_pad_wav = torch.zeros(wav.shape[0], read_frames - expected_frames)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[1] == read_frames
assert torch.allclose(read_wav[..., :expected_frames], wav[..., seek_frames:], rtol=1e-03, atol=1e-04)
assert torch.allclose(read_wav[..., expected_frames:], expected_pad_wav)
class TestAvRead(TempDirMixin):
def test_avread_seek_base(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 2.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames)
path = self.get_temp_path(f'reference_a_{sample_rate}_{ch}.wav')
save_wav(path, wav, sample_rate)
for _ in range(100):
# seek will always load a full duration segment in the file
seek_time = random.uniform(0.0, 1.0)
seek_duration = random.uniform(0.001, 1.0)
read_wav, read_sr = _av_read(path, seek_time, seek_duration)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[-1] == int(seek_duration * sample_rate)
def test_avread_seek_partial(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames)
path = self.get_temp_path(f'reference_b_{sample_rate}_{ch}.wav')
save_wav(path, wav, sample_rate)
for _ in range(100):
# seek will always load a partial segment
seek_time = random.uniform(0.5, 1.)
seek_duration = 1.
expected_num_frames = n_frames - int(seek_time * sample_rate)
read_wav, read_sr = _av_read(path, seek_time, seek_duration)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[-1] == expected_num_frames
def test_avread_seek_outofbound(self):
sample_rates = [8000, 16_000]
channels = [1, 2]
duration = 1.
for sample_rate, ch in product(sample_rates, channels):
n_frames = int(sample_rate * duration)
wav = get_white_noise(ch, n_frames)
path = self.get_temp_path(f'reference_c_{sample_rate}_{ch}.wav')
save_wav(path, wav, sample_rate)
seek_time = 1.5
read_wav, read_sr = _av_read(path, seek_time, 1.)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[-1] == 0
def test_avread_seek_edge(self):
sample_rates = [8000, 16_000]
# some of these values will have
# int(((frames - 1) / sample_rate) * sample_rate) != (frames - 1)
n_frames = [1000, 1001, 1002]
channels = [1, 2]
for sample_rate, ch, frames in product(sample_rates, channels, n_frames):
duration = frames / sample_rate
wav = get_white_noise(ch, frames)
path = self.get_temp_path(f'reference_d_{sample_rate}_{ch}.wav')
save_wav(path, wav, sample_rate)
seek_time = (frames - 1) / sample_rate
seek_frames = int(seek_time * sample_rate)
read_wav, read_sr = _av_read(path, seek_time, duration)
assert read_sr == sample_rate
assert read_wav.shape[0] == wav.shape[0]
assert read_wav.shape[-1] == (frames - seek_frames)
class TestAudioWrite(TempDirMixin):
def test_audio_write_wav(self):
torch.manual_seed(1234)
sample_rates = [8000, 16_000]
n_frames = [1000, 1001, 1002]
channels = [1, 2]
strategies = ["peak", "clip", "rms"]
formats = ["wav", "mp3"]
for sample_rate, ch, frames in product(sample_rates, channels, n_frames):
for format_, strategy in product(formats, strategies):
wav = get_white_noise(ch, frames)
path = self.get_temp_path(f'pred_{sample_rate}_{ch}')
audio_write(path, wav, sample_rate, format_, strategy=strategy)
read_wav, read_sr = torchaudio.load(f'{path}.{format_}')
if format_ == "wav":
assert read_wav.shape == wav.shape
if format_ == "wav" and strategy in ["peak", "rms"]:
rescaled_read_wav = read_wav / read_wav.abs().max() * wav.abs().max()
# for a Gaussian, the typical max scale will be less than ~5x the std.
# The error when writing to disk will ~ 1/2**15, and when rescaling, 5x that.
# For RMS target, rescaling leaves more headroom by default, leading
# to a 20x rescaling typically
atol = (5 if strategy == "peak" else 20) / 2**15
delta = (rescaled_read_wav - wav).abs().max()
assert torch.allclose(wav, rescaled_read_wav, rtol=0, atol=atol), (delta, atol)
formats = ["wav"] # faster unit tests
| audiocraft-main | tests/data/test_audio.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | tests/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import julius
import torch
import pytest
from audiocraft.data.audio_utils import (
_clip_wav,
convert_audio_channels,
convert_audio,
normalize_audio
)
from ..common_utils import get_batch_white_noise
class TestConvertAudioChannels:
def test_convert_audio_channels_downmix(self):
b, c, t = 2, 3, 100
audio = get_batch_white_noise(b, c, t)
mixed = convert_audio_channels(audio, channels=2)
assert list(mixed.shape) == [b, 2, t]
def test_convert_audio_channels_nochange(self):
b, c, t = 2, 3, 100
audio = get_batch_white_noise(b, c, t)
mixed = convert_audio_channels(audio, channels=c)
assert list(mixed.shape) == list(audio.shape)
def test_convert_audio_channels_upmix(self):
b, c, t = 2, 1, 100
audio = get_batch_white_noise(b, c, t)
mixed = convert_audio_channels(audio, channels=3)
assert list(mixed.shape) == [b, 3, t]
def test_convert_audio_channels_upmix_error(self):
b, c, t = 2, 2, 100
audio = get_batch_white_noise(b, c, t)
with pytest.raises(ValueError):
convert_audio_channels(audio, channels=3)
class TestConvertAudio:
def test_convert_audio_channels_downmix(self):
b, c, dur = 2, 3, 4.
sr = 128
audio = get_batch_white_noise(b, c, int(sr * dur))
out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2)
assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]]
def test_convert_audio_channels_upmix(self):
b, c, dur = 2, 1, 4.
sr = 128
audio = get_batch_white_noise(b, c, int(sr * dur))
out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3)
assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]]
def test_convert_audio_upsample(self):
b, c, dur = 2, 1, 4.
sr = 2
new_sr = 3
audio = get_batch_white_noise(b, c, int(sr * dur))
out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
assert torch.allclose(out, out_j)
def test_convert_audio_resample(self):
b, c, dur = 2, 1, 4.
sr = 3
new_sr = 2
audio = get_batch_white_noise(b, c, int(sr * dur))
out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
assert torch.allclose(out, out_j)
class TestNormalizeAudio:
def test_clip_wav(self):
b, c, dur = 2, 1, 4.
sr = 3
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
_clip_wav(audio)
assert audio.abs().max() <= 1
def test_normalize_audio_clip(self):
b, c, dur = 2, 1, 4.
sr = 3
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
norm_audio = normalize_audio(audio, strategy='clip')
assert norm_audio.abs().max() <= 1
def test_normalize_audio_rms(self):
b, c, dur = 2, 1, 4.
sr = 3
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
norm_audio = normalize_audio(audio, strategy='rms')
assert norm_audio.abs().max() <= 1
def test_normalize_audio_peak(self):
b, c, dur = 2, 1, 4.
sr = 3
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
norm_audio = normalize_audio(audio, strategy='peak')
assert norm_audio.abs().max() <= 1
| audiocraft-main | tests/data/test_audio_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | scripts/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
To run this script, from the root of the repo. Make sure to have Flask installed
FLASK_DEBUG=1 FLASK_APP=scripts.mos flask run -p 4567
# or if you have gunicorn
gunicorn -w 4 -b 127.0.0.1:8895 -t 120 'scripts.mos:app' --access-logfile -
"""
from collections import defaultdict
from functools import wraps
from hashlib import sha1
import json
import math
from pathlib import Path
import random
import typing as tp
from flask import Flask, redirect, render_template, request, session, url_for
from audiocraft import train
from audiocraft.utils.samples.manager import get_samples_for_xps
SAMPLES_PER_PAGE = 8
MAX_RATING = 5
storage = Path(train.main.dora.dir / 'mos_storage')
storage.mkdir(exist_ok=True)
surveys = storage / 'surveys'
surveys.mkdir(exist_ok=True)
magma_root = Path(train.__file__).parent.parent
app = Flask('mos', static_folder=str(magma_root / 'scripts/static'),
template_folder=str(magma_root / 'scripts/templates'))
app.secret_key = b'audiocraft makes the best songs'
def normalize_path(path: Path):
"""Just to make path a bit nicer, make them relative to the Dora root dir.
"""
path = path.resolve()
dora_dir = train.main.dora.dir.resolve() / 'xps'
return path.relative_to(dora_dir)
def get_full_path(normalized_path: Path):
"""Revert `normalize_path`.
"""
return train.main.dora.dir.resolve() / 'xps' / normalized_path
def get_signature(xps: tp.List[str]):
"""Return a signature for a list of XP signatures.
"""
return sha1(json.dumps(xps).encode()).hexdigest()[:10]
def ensure_logged(func):
"""Ensure user is logged in.
"""
@wraps(func)
def _wrapped(*args, **kwargs):
user = session.get('user')
if user is None:
return redirect(url_for('login', redirect_to=request.url))
return func(*args, **kwargs)
return _wrapped
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Login user if not already, then redirect.
"""
user = session.get('user')
if user is None:
error = None
if request.method == 'POST':
user = request.form['user']
if not user:
error = 'User cannot be empty'
if user is None or error:
return render_template('login.html', error=error)
assert user
session['user'] = user
redirect_to = request.args.get('redirect_to')
if redirect_to is None:
redirect_to = url_for('index')
return redirect(redirect_to)
@app.route('/', methods=['GET', 'POST'])
@ensure_logged
def index():
"""Offer to create a new study.
"""
errors = []
if request.method == 'POST':
xps_or_grids = [part.strip() for part in request.form['xps'].split()]
xps = set()
for xp_or_grid in xps_or_grids:
xp_path = train.main.dora.dir / 'xps' / xp_or_grid
if xp_path.exists():
xps.add(xp_or_grid)
continue
grid_path = train.main.dora.dir / 'grids' / xp_or_grid
if grid_path.exists():
for child in grid_path.iterdir():
if child.is_symlink():
xps.add(child.name)
continue
errors.append(f'{xp_or_grid} is neither an XP nor a grid!')
assert xps or errors
blind = 'true' if request.form.get('blind') == 'on' else 'false'
xps = list(xps)
if not errors:
signature = get_signature(xps)
manifest = {
'xps': xps,
}
survey_path = surveys / signature
survey_path.mkdir(exist_ok=True)
with open(survey_path / 'manifest.json', 'w') as f:
json.dump(manifest, f, indent=2)
return redirect(url_for('survey', blind=blind, signature=signature))
return render_template('index.html', errors=errors)
@app.route('/survey/<signature>', methods=['GET', 'POST'])
@ensure_logged
def survey(signature):
success = request.args.get('success', False)
seed = int(request.args.get('seed', 4321))
blind = request.args.get('blind', 'false') in ['true', 'on', 'True']
exclude_prompted = request.args.get('exclude_prompted', 'false') in ['true', 'on', 'True']
exclude_unprompted = request.args.get('exclude_unprompted', 'false') in ['true', 'on', 'True']
max_epoch = int(request.args.get('max_epoch', '-1'))
survey_path = surveys / signature
assert survey_path.exists(), survey_path
user = session['user']
result_folder = survey_path / 'results'
result_folder.mkdir(exist_ok=True)
result_file = result_folder / f'{user}_{seed}.json'
with open(survey_path / 'manifest.json') as f:
manifest = json.load(f)
xps = [train.main.get_xp_from_sig(xp) for xp in manifest['xps']]
names, ref_name = train.main.get_names(xps)
samples_kwargs = {
'exclude_prompted': exclude_prompted,
'exclude_unprompted': exclude_unprompted,
'max_epoch': max_epoch,
}
matched_samples = get_samples_for_xps(xps, epoch=-1, **samples_kwargs) # fetch latest epoch
models_by_id = {
id: [{
'xp': xps[idx],
'xp_name': names[idx],
'model_id': f'{xps[idx].sig}-{sample.id}',
'sample': sample,
'is_prompted': sample.prompt is not None,
'errors': [],
} for idx, sample in enumerate(samples)]
for id, samples in matched_samples.items()
}
experiments = [
{'xp': xp, 'name': names[idx], 'epoch': list(matched_samples.values())[0][idx].epoch}
for idx, xp in enumerate(xps)
]
keys = list(matched_samples.keys())
keys.sort()
rng = random.Random(seed)
rng.shuffle(keys)
model_ids = keys[:SAMPLES_PER_PAGE]
if blind:
for key in model_ids:
rng.shuffle(models_by_id[key])
ok = True
if request.method == 'POST':
all_samples_results = []
for id in model_ids:
models = models_by_id[id]
result = {
'id': id,
'is_prompted': models[0]['is_prompted'],
'models': {}
}
all_samples_results.append(result)
for model in models:
rating = request.form[model['model_id']]
if rating:
rating = int(rating)
assert rating <= MAX_RATING and rating >= 1
result['models'][model['xp'].sig] = rating
model['rating'] = rating
else:
ok = False
model['errors'].append('Please rate this model.')
if ok:
result = {
'results': all_samples_results,
'seed': seed,
'user': user,
'blind': blind,
'exclude_prompted': exclude_prompted,
'exclude_unprompted': exclude_unprompted,
}
print(result)
with open(result_file, 'w') as f:
json.dump(result, f)
seed = seed + 1
return redirect(url_for(
'survey', signature=signature, blind=blind, seed=seed,
exclude_prompted=exclude_prompted, exclude_unprompted=exclude_unprompted,
max_epoch=max_epoch, success=True))
ratings = list(range(1, MAX_RATING + 1))
return render_template(
'survey.html', ratings=ratings, blind=blind, seed=seed, signature=signature, success=success,
exclude_prompted=exclude_prompted, exclude_unprompted=exclude_unprompted, max_epoch=max_epoch,
experiments=experiments, models_by_id=models_by_id, model_ids=model_ids, errors=[],
ref_name=ref_name, already_filled=result_file.exists())
@app.route('/audio/<path:path>')
def audio(path: str):
full_path = Path('/') / path
assert full_path.suffix in [".mp3", ".wav"]
return full_path.read_bytes(), {'Content-Type': 'audio/mpeg'}
def mean(x):
return sum(x) / len(x)
def std(x):
m = mean(x)
return math.sqrt(sum((i - m)**2 for i in x) / len(x))
@app.route('/results/<signature>')
@ensure_logged
def results(signature):
survey_path = surveys / signature
assert survey_path.exists(), survey_path
result_folder = survey_path / 'results'
result_folder.mkdir(exist_ok=True)
# ratings per model, then per user.
ratings_per_model = defaultdict(list)
users = []
for result_file in result_folder.iterdir():
if result_file.suffix != '.json':
continue
with open(result_file) as f:
results = json.load(f)
users.append(results['user'])
for result in results['results']:
for sig, rating in result['models'].items():
ratings_per_model[sig].append(rating)
fmt = '{:.2f}'
models = []
for model in sorted(ratings_per_model.keys()):
ratings = ratings_per_model[model]
models.append({
'sig': model,
'samples': len(ratings),
'mean_rating': fmt.format(mean(ratings)),
# the value 1.96 was probably chosen to achieve some
# confidence interval assuming gaussianity.
'std_rating': fmt.format(1.96 * std(ratings) / len(ratings)**0.5),
})
return render_template('results.html', signature=signature, models=models, users=users)
| audiocraft-main | scripts/mos.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Resampling script.
"""
import argparse
from pathlib import Path
import shutil
import typing as tp
import submitit
import tqdm
from audiocraft.data.audio import audio_read, audio_write
from audiocraft.data.audio_dataset import load_audio_meta, find_audio_files
from audiocraft.data.audio_utils import convert_audio
from audiocraft.environment import AudioCraftEnvironment
def read_txt_files(path: tp.Union[str, Path]):
with open(args.files_path) as f:
lines = [line.rstrip() for line in f]
print(f"Read {len(lines)} in .txt")
lines = [line for line in lines if Path(line).suffix not in ['.json', '.txt', '.csv']]
print(f"Filtered and keep {len(lines)} from .txt")
return lines
def read_egs_files(path: tp.Union[str, Path]):
path = Path(path)
if path.is_dir():
if (path / 'data.jsonl').exists():
path = path / 'data.jsonl'
elif (path / 'data.jsonl.gz').exists():
path = path / 'data.jsonl.gz'
else:
raise ValueError("Don't know where to read metadata from in the dir. "
"Expecting either a data.jsonl or data.jsonl.gz file but none found.")
meta = load_audio_meta(path)
return [m.path for m in meta]
def process_dataset(args, n_shards: int, node_index: int, task_index: tp.Optional[int] = None):
if task_index is None:
env = submitit.JobEnvironment()
task_index = env.global_rank
shard_index = node_index * args.tasks_per_node + task_index
if args.files_path is None:
lines = [m.path for m in find_audio_files(args.root_path, resolve=False, progress=True, workers=8)]
else:
files_path = Path(args.files_path)
if files_path.suffix == '.txt':
print(f"Reading file list from .txt file: {args.files_path}")
lines = read_txt_files(args.files_path)
else:
print(f"Reading file list from egs: {args.files_path}")
lines = read_egs_files(args.files_path)
total_files = len(lines)
print(
f"Total of {total_files} processed with {n_shards} shards. " +
f"Current idx = {shard_index} -> {total_files // n_shards} files to process"
)
for idx, line in tqdm.tqdm(enumerate(lines)):
# skip if not part of this shard
if idx % n_shards != shard_index:
continue
path = str(AudioCraftEnvironment.apply_dataset_mappers(line))
root_path = str(args.root_path)
if not root_path.endswith('/'):
root_path += '/'
assert path.startswith(str(root_path)), \
f"Mismatch between path and provided root: {path} VS {root_path}"
try:
metadata_path = Path(path).with_suffix('.json')
out_path = args.out_path / path[len(root_path):]
out_metadata_path = out_path.with_suffix('.json')
out_done_token = out_path.with_suffix('.done')
# don't reprocess existing files
if out_done_token.exists():
continue
print(idx, out_path, path)
mix, sr = audio_read(path)
mix_channels = args.channels if args.channels is not None and args.channels > 0 else mix.size(0)
# enforce simple stereo
out_channels = mix_channels
if out_channels > 2:
print(f"Mix has more than two channels: {out_channels}, enforcing 2 channels")
out_channels = 2
out_sr = args.sample_rate if args.sample_rate is not None else sr
out_wav = convert_audio(mix, sr, out_sr, out_channels)
audio_write(out_path.with_suffix(''), out_wav, sample_rate=out_sr,
format=args.format, normalize=False, strategy='clip')
if metadata_path.exists():
shutil.copy(metadata_path, out_metadata_path)
else:
print(f"No metadata found at {str(metadata_path)}")
out_done_token.touch()
except Exception as e:
print(f"Error processing file line: {line}, {e}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Resample dataset with SLURM.")
parser.add_argument(
"--log_root",
type=Path,
default=Path.home() / 'tmp' / 'resample_logs',
)
parser.add_argument(
"--files_path",
type=Path,
help="List of files to process, either .txt (one file per line) or a jsonl[.gz].",
)
parser.add_argument(
"--root_path",
type=Path,
required=True,
help="When rewriting paths, this will be the prefix to remove.",
)
parser.add_argument(
"--out_path",
type=Path,
required=True,
help="When rewriting paths, `root_path` will be replaced by this.",
)
parser.add_argument("--xp_name", type=str, default="shutterstock")
parser.add_argument(
"--nodes",
type=int,
default=4,
)
parser.add_argument(
"--tasks_per_node",
type=int,
default=20,
)
parser.add_argument(
"--cpus_per_task",
type=int,
default=4,
)
parser.add_argument(
"--memory_gb",
type=int,
help="Memory in GB."
)
parser.add_argument(
"--format",
type=str,
default="wav",
)
parser.add_argument(
"--sample_rate",
type=int,
default=32000,
)
parser.add_argument(
"--channels",
type=int,
)
parser.add_argument(
"--partition",
default='learnfair',
)
parser.add_argument("--qos")
parser.add_argument("--account")
parser.add_argument("--timeout", type=int, default=4320)
parser.add_argument('--debug', action='store_true', help='debug mode (local run)')
args = parser.parse_args()
n_shards = args.tasks_per_node * args.nodes
if args.files_path is None:
print("Warning: --files_path not provided, not recommended when processing more than 10k files.")
if args.debug:
print("Debugging mode")
process_dataset(args, n_shards=n_shards, node_index=0, task_index=0)
else:
log_folder = Path(args.log_root) / args.xp_name / '%j'
print(f"Logging to: {log_folder}")
log_folder.parent.mkdir(parents=True, exist_ok=True)
executor = submitit.AutoExecutor(folder=str(log_folder))
if args.qos:
executor.update_parameters(slurm_partition=args.partition, slurm_qos=args.qos, slurm_account=args.account)
else:
executor.update_parameters(slurm_partition=args.partition)
executor.update_parameters(
slurm_job_name=args.xp_name, timeout_min=args.timeout,
cpus_per_task=args.cpus_per_task, tasks_per_node=args.tasks_per_node, nodes=1)
if args.memory_gb:
executor.update_parameters(mem=f'{args.memory_gb}GB')
jobs = []
with executor.batch():
for node_index in range(args.nodes):
job = executor.submit(process_dataset, args, n_shards=n_shards, node_index=node_index)
jobs.append(job)
for job in jobs:
print(f"Waiting on job {job.job_id}")
job.results()
| audiocraft-main | scripts/resample_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
# also released under the MIT license.
import argparse
from concurrent.futures import ProcessPoolExecutor
import os
from pathlib import Path
import subprocess as sp
from tempfile import NamedTemporaryFile
import time
import typing as tp
import warnings
import torch
import gradio as gr
from audiocraft.data.audio_utils import convert_audio
from audiocraft.data.audio import audio_write
from audiocraft.models import MusicGen, MultiBandDiffusion
MODEL = None # Last used model
IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
print(IS_BATCHED)
MAX_BATCH_SIZE = 12
BATCHED_DURATION = 15
INTERRUPTING = False
MBD = None
# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
_old_call = sp.call
def _call_nostderr(*args, **kwargs):
# Avoid ffmpeg vomiting on the logs.
kwargs['stderr'] = sp.DEVNULL
kwargs['stdout'] = sp.DEVNULL
_old_call(*args, **kwargs)
sp.call = _call_nostderr
# Preallocating the pool of processes.
pool = ProcessPoolExecutor(4)
pool.__enter__()
def interrupt():
global INTERRUPTING
INTERRUPTING = True
class FileCleaner:
def __init__(self, file_lifetime: float = 3600):
self.file_lifetime = file_lifetime
self.files = []
def add(self, path: tp.Union[str, Path]):
self._cleanup()
self.files.append((time.time(), Path(path)))
def _cleanup(self):
now = time.time()
for time_added, path in list(self.files):
if now - time_added > self.file_lifetime:
if path.exists():
path.unlink()
self.files.pop(0)
else:
break
file_cleaner = FileCleaner()
def make_waveform(*args, **kwargs):
# Further remove some warnings.
be = time.time()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
out = gr.make_waveform(*args, **kwargs)
print("Make a video took", time.time() - be)
return out
def load_model(version='facebook/musicgen-melody'):
global MODEL
print("Loading model", version)
if MODEL is None or MODEL.name != version:
MODEL = MusicGen.get_pretrained(version)
def load_diffusion():
global MBD
if MBD is None:
print("loading MBD")
MBD = MultiBandDiffusion.get_mbd_musicgen()
def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
MODEL.set_generation_params(duration=duration, **gen_kwargs)
print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
be = time.time()
processed_melodies = []
target_sr = 32000
target_ac = 1
for melody in melodies:
if melody is None:
processed_melodies.append(None)
else:
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
if melody.dim() == 1:
melody = melody[None]
melody = melody[..., :int(sr * duration)]
melody = convert_audio(melody, sr, target_sr, target_ac)
processed_melodies.append(melody)
if any(m is not None for m in processed_melodies):
outputs = MODEL.generate_with_chroma(
descriptions=texts,
melody_wavs=processed_melodies,
melody_sample_rate=target_sr,
progress=progress,
return_tokens=USE_DIFFUSION
)
else:
outputs = MODEL.generate(texts, progress=progress, return_tokens=USE_DIFFUSION)
if USE_DIFFUSION:
outputs_diffusion = MBD.tokens_to_wav(outputs[1])
outputs = torch.cat([outputs[0], outputs_diffusion], dim=0)
outputs = outputs.detach().cpu().float()
pending_videos = []
out_wavs = []
for output in outputs:
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
audio_write(
file.name, output, MODEL.sample_rate, strategy="loudness",
loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
pending_videos.append(pool.submit(make_waveform, file.name))
out_wavs.append(file.name)
file_cleaner.add(file.name)
out_videos = [pending_video.result() for pending_video in pending_videos]
for video in out_videos:
file_cleaner.add(video)
print("batch finished", len(texts), time.time() - be)
print("Tempfiles currently stored: ", len(file_cleaner.files))
return out_videos, out_wavs
def predict_batched(texts, melodies):
max_text_length = 512
texts = [text[:max_text_length] for text in texts]
load_model('facebook/musicgen-melody')
res = _do_predictions(texts, melodies, BATCHED_DURATION)
return res
def predict_full(model, decoder, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
global INTERRUPTING
global USE_DIFFUSION
INTERRUPTING = False
if temperature < 0:
raise gr.Error("Temperature must be >= 0.")
if topk < 0:
raise gr.Error("Topk must be non-negative.")
if topp < 0:
raise gr.Error("Topp must be non-negative.")
topk = int(topk)
if decoder == "MultiBand_Diffusion":
USE_DIFFUSION = True
load_diffusion()
else:
USE_DIFFUSION = False
load_model(model)
def _progress(generated, to_generate):
progress((min(generated, to_generate), to_generate))
if INTERRUPTING:
raise gr.Error("Interrupted.")
MODEL.set_custom_progress_callback(_progress)
videos, wavs = _do_predictions(
[text], [melody], duration, progress=True,
top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
if USE_DIFFUSION:
return videos[0], wavs[0], videos[1], wavs[1]
return videos[0], wavs[0], None, None
def toggle_audio_src(choice):
if choice == "mic":
return gr.update(source="microphone", value=None, label="Microphone")
else:
return gr.update(source="upload", value=None, label="File")
def toggle_diffusion(choice):
if choice == "MultiBand_Diffusion":
return [gr.update(visible=True)] * 2
else:
return [gr.update(visible=False)] * 2
def ui_full(launch_kwargs):
with gr.Blocks() as interface:
gr.Markdown(
"""
# MusicGen
This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
a simple and controllable model for music generation
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
"""
)
with gr.Row():
with gr.Column():
with gr.Row():
text = gr.Text(label="Input Text", interactive=True)
with gr.Column():
radio = gr.Radio(["file", "mic"], value="file",
label="Condition on a melody (optional) File or Mic")
melody = gr.Audio(source="upload", type="numpy", label="File",
interactive=True, elem_id="melody-input")
with gr.Row():
submit = gr.Button("Submit")
# Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
_ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
with gr.Row():
model = gr.Radio(["facebook/musicgen-melody", "facebook/musicgen-medium", "facebook/musicgen-small",
"facebook/musicgen-large"],
label="Model", value="facebook/musicgen-melody", interactive=True)
with gr.Row():
decoder = gr.Radio(["Default", "MultiBand_Diffusion"],
label="Decoder", value="Default", interactive=True)
with gr.Row():
duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
with gr.Row():
topk = gr.Number(label="Top-k", value=250, interactive=True)
topp = gr.Number(label="Top-p", value=0, interactive=True)
temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
with gr.Column():
output = gr.Video(label="Generated Music")
audio_output = gr.Audio(label="Generated Music (wav)", type='filepath')
diffusion_output = gr.Video(label="MultiBand Diffusion Decoder")
audio_diffusion = gr.Audio(label="MultiBand Diffusion Decoder (wav)", type='filepath')
submit.click(toggle_diffusion, decoder, [diffusion_output, audio_diffusion], queue=False,
show_progress=False).then(predict_full, inputs=[model, decoder, text, melody, duration, topk, topp,
temperature, cfg_coef],
outputs=[output, audio_output, diffusion_output, audio_diffusion])
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
gr.Examples(
fn=predict_full,
examples=[
[
"An 80s driving pop song with heavy drums and synth pads in the background",
"./assets/bach.mp3",
"facebook/musicgen-melody",
"Default"
],
[
"A cheerful country song with acoustic guitars",
"./assets/bolero_ravel.mp3",
"facebook/musicgen-melody",
"Default"
],
[
"90s rock song with electric guitar and heavy drums",
None,
"facebook/musicgen-medium",
"Default"
],
[
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
"./assets/bach.mp3",
"facebook/musicgen-melody",
"Default"
],
[
"lofi slow bpm electro chill with organic samples",
None,
"facebook/musicgen-medium",
"Default"
],
[
"Punk rock with loud drum and power guitar",
None,
"facebook/musicgen-medium",
"MultiBand_Diffusion"
],
],
inputs=[text, melody, model, decoder],
outputs=[output]
)
gr.Markdown(
"""
### More details
The model will generate a short music extract based on the description you provided.
The model can generate up to 30 seconds of audio in one pass. It is now possible
to extend the generation by feeding back the end of the previous chunk of audio.
This can take a long time, and the model might lose consistency. The model might also
decide at arbitrary positions that the song ends.
**WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
are generated each time.
We present 4 model variations:
1. facebook/musicgen-melody -- a music generation model capable of generating music condition
on text and melody inputs. **Note**, you can also use text only.
2. facebook/musicgen-small -- a 300M transformer decoder conditioned on text only.
3. facebook/musicgen-medium -- a 1.5B transformer decoder conditioned on text only.
4. facebook/musicgen-large -- a 3.3B transformer decoder conditioned on text only.
We also present two way of decoding the audio tokens
1. Use the default GAN based compression model
2. Use MultiBand Diffusion from (paper linknano )
When using `facebook/musicgen-melody`, you can optionally provide a reference audio from
which a broad melody will be extracted. The model will then try to follow both
the description and melody provided.
You can also use your own GPU or a Google Colab by following the instructions on our repo.
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
for more details.
"""
)
interface.queue().launch(**launch_kwargs)
def ui_batched(launch_kwargs):
with gr.Blocks() as demo:
gr.Markdown(
"""
# MusicGen
This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
a simple and controllable model for music generation
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
<br/>
<a href="https://huggingface.co/spaces/facebook/MusicGen?duplicate=true"
style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;"
src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
for longer sequences, more control and no queue.</p>
"""
)
with gr.Row():
with gr.Column():
with gr.Row():
text = gr.Text(label="Describe your music", lines=2, interactive=True)
with gr.Column():
radio = gr.Radio(["file", "mic"], value="file",
label="Condition on a melody (optional) File or Mic")
melody = gr.Audio(source="upload", type="numpy", label="File",
interactive=True, elem_id="melody-input")
with gr.Row():
submit = gr.Button("Generate")
with gr.Column():
output = gr.Video(label="Generated Music")
audio_output = gr.Audio(label="Generated Music (wav)", type='filepath')
submit.click(predict_batched, inputs=[text, melody],
outputs=[output, audio_output], batch=True, max_batch_size=MAX_BATCH_SIZE)
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
gr.Examples(
fn=predict_batched,
examples=[
[
"An 80s driving pop song with heavy drums and synth pads in the background",
"./assets/bach.mp3",
],
[
"A cheerful country song with acoustic guitars",
"./assets/bolero_ravel.mp3",
],
[
"90s rock song with electric guitar and heavy drums",
None,
],
[
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
"./assets/bach.mp3",
],
[
"lofi slow bpm electro chill with organic samples",
None,
],
],
inputs=[text, melody],
outputs=[output]
)
gr.Markdown("""
### More details
The model will generate 12 seconds of audio based on the description you provided.
You can optionally provide a reference audio from which a broad melody will be extracted.
The model will then try to follow both the description and melody provided.
All samples are generated with the `melody` model.
You can also use your own GPU or a Google Colab by following the instructions on our repo.
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
for more details.
""")
demo.queue(max_size=8 * 4).launch(**launch_kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--listen',
type=str,
default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
help='IP to listen on for connections to Gradio',
)
parser.add_argument(
'--username', type=str, default='', help='Username for authentication'
)
parser.add_argument(
'--password', type=str, default='', help='Password for authentication'
)
parser.add_argument(
'--server_port',
type=int,
default=0,
help='Port to run the server listener on',
)
parser.add_argument(
'--inbrowser', action='store_true', help='Open in browser'
)
parser.add_argument(
'--share', action='store_true', help='Share the gradio UI'
)
args = parser.parse_args()
launch_kwargs = {}
launch_kwargs['server_name'] = args.listen
if args.username and args.password:
launch_kwargs['auth'] = (args.username, args.password)
if args.server_port:
launch_kwargs['server_port'] = args.server_port
if args.inbrowser:
launch_kwargs['inbrowser'] = args.inbrowser
if args.share:
launch_kwargs['share'] = args.share
# Show the interface
if IS_BATCHED:
global USE_DIFFUSION
USE_DIFFUSION = False
ui_batched(launch_kwargs)
else:
ui_full(launch_kwargs)
| audiocraft-main | demos/musicgen_app.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
extras_require = {
"visualization": ["graphviz"],
"tests": ["cytoolz", "pytest", "pytest-cov"],
}
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
name="dagger",
version="0.1.0",
install_requires=["dill", "dask"],
packages=find_packages(),
extras_require=extras_require,
)
| dagger-master | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .experiment import (
Experiment,
ExperimentState,
ExperimentStatePromise,
Function,
Recipe,
)
| dagger-master | dagger/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import json
import logging
import pathlib
import pickle
import uuid
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
from glob import glob
import dask
import dill
from .static import StaticExperimentTree
logger = logging.getLogger("dagger")
class Experiment:
"""
An Experiment object retains state for all the ExperimentStates and
subsequent transitions that are governed by Recipe objects. This is used
by dask to store and then execute the many computation paths that give
origin to the various experiment states in the experiment.
Attributes:
directory (str | pathlib.Path): The directory all the stages of the
experiment will be stored in.
leaves (Dict[uuid.UUID, dask.Delayed]): (internal) Stores the final
states for leaf nodes in the Experiment graph. Dask will traverse
until the leaf nodes are computed, in the case where we lazily
define the graph.
nodes (Dict[uuid.UUID, dask.Delayed]): (internal) Stores the final
states for all nodes in the Experiment graph.
root (ExperimentState): The (never changing) root of the
Experiment graph.
state_class: python class of which all experiment states in this
experiment are instances.
experiment_arguments (dict): information and properties that
define the experiment and its root.
graph (StaticExperimentTree): bookeeping object to access all
topologically-sorted nodes in the experiment.
tags (list): tags associated with experiment states in this experiment
"""
def __init__(self, directory, state_class):
"""Create a new Experiment object.
An Experiment provides a sort-of anchor for a DAG of experimental steps.
To create an Experiment, you must provide a directory, which will be the
location for the Experiment to deposit a file for each state in the
experiment graph.
Args:
directory (str | pathlib.Path): Path to a directory (will be
created) which will be the root for all experment state
transitions to be stored.
state_class: python class of which all experiment states in this
experiment are instances.
"""
self.nodes = {}
self.leaves = {}
self.root = None
self.directory = pathlib.Path(directory).absolute()
self.directory.mkdir(exist_ok=True)
self.state_class = state_class
self.experiment_arguments = None
# The graph is only built up when loading an already-run graph
self.graph = None
# This is a tag manager
self.tags = []
@contextmanager
def tag(self, tag):
"""Context manager that associates each state defined in this context
with the `tag` passed in as an argument. Extends the experiment's tags
list while in the context.
"""
if isinstance(tag, str):
tag = [tag]
original_tags = self.tags[:]
self.tags.extend(tag)
yield
self.tags = original_tags
def spawn_new_tree(self, **experiment_arguments):
"""
Create the initial experiment state to anchor off of the current
Experiment object.
Args:
experiment_arguments (dict): information and properties that
define the experiment and its root.
Returns:
ExperimentStatePromise: The initial state represented as a
promise, so not yet computed or realized.
Raises:
RuntimeError: If the Experiment object already has a root
ExperimentState.
"""
if self.root:
raise RuntimeError(
"This Experiment object already has a root ExperimentState!"
)
self.experiment_arguments = experiment_arguments
# Here, we begin defining what should be contained in an
# ExperimentState. The main thing to note here is that an
# ExperimentState is initialized with a parent_sha (an identifier of
# its progenitor state) and the experiment object itself. This is
# critical so every ExperimentState in an Experiment knows where it
# comes from and can find its root.
initial_state = self.state_class(
parent_sha=None, experiment_object=self
)
initial_state.root_state = True
for prop in getattr(initial_state.__class__, "PROPERTIES", []):
# Will raise a KeyError if not passed in
if prop not in experiment_arguments:
raise KeyError(
"State property {} not in found".format(prop)
+ "in `experiment_arguments`"
)
setattr(initial_state, prop, experiment_arguments[prop])
# Needs to be implemented when subclassing ExperimentState
initial_state.initialize_state(**experiment_arguments)
if not initial_state.restore():
logger.info("Root state unsaved - saving now")
initial_state.save()
self.root = initial_state
del initial_state
# Turn the root into a state promise to attach to the dask tree
return ExperimentStatePromise(
promise=self.root,
id=uuid.uuid4(),
previous_id=None,
experiment_object=self,
)
def save(self):
"""Save minimal information needed to start reconstructing the
experiment, i.e. how to locate the root, as well as other basic info
about the experiment parameters. These are saved in human-readable
json in <directory>/experiment.json
"""
json.dump(
{
"root": self.root.sha(),
"experiment_arguments": self.experiment_arguments,
},
open(self.directory / "experiment.json", "w"),
indent=4,
)
@classmethod
def restore(cls, directory, state_class=None, slim=True):
"""Restoring an experiment means reinstantiating an Experiment
object with all correct attributes previously stored in the `save`
phase using json. This will load the slim version of all experiments
found in the `directory`, and use their slim info (found in the
<sha>.slim.json files in the `directory`) to reconstruct the `edge_map`
and `node_map`, thus reconstructing the whole experimental tree
structure and storing it in a `StaticExperimentTree` for analysis.
Args:
directory (str | pathlib.Path): Path to a directory (will be
created) which will be the root for all experment state
transitions to be stored.
state_class: python class of which all experiment states in this
experiment are instances.
slim (bool): whether to lead the ExperimentStates in slim format
when restoring the experiment (True) or in full format (False)
Returns:
The loaded Experiment corresponding to all states found in the
specified `directory`.
"""
state_class = state_class or ExperimentState
directory = pathlib.Path(directory)
experiment = cls(directory, state_class)
experiment.__dict__.update(
json.load(open(directory / "experiment.json", "r"))
)
# .root is first loaded in as a string sha.
experiment.root = state_class.load(
experiment.root, experiment=experiment, slim=slim
)
# Get all experiments by looking for all .slim.json files in the
# directory
all_shas = [
pathlib.Path(p).name.replace(".slim.json", "")
for p in glob(f"{directory / '*.slim.json'}")
]
# Reconstruct the experiment tree by connecting all nodes with edges
# using the parent and children info contained in each state
edge_map = defaultdict(set)
node_map = dict()
for sha in all_shas:
if sha != experiment.root.sha():
state = state_class.load(
sha=sha, experiment=experiment, slim=slim
)
else:
state = experiment.root
node_map[sha] = state
if state.parent_sha:
edge_map[state.parent_sha].add(sha)
# Run a topological sort so we can filter states by distance from the
# root
toposort = defaultdict(set)
queue = {experiment.root.sha()}
depth = 0
while queue:
new_queue = set()
for node in queue:
toposort[depth].add(node)
for child_node in edge_map[node]:
new_queue.add(child_node)
depth += 1
queue = new_queue
# Make sure we can traverse the graph sensically
while depth > 0:
for node in map(node_map.get, toposort.get(depth, [])):
if node:
parent = node_map.get(node.parent_sha)
if not hasattr(parent, "children"):
parent.children = {node}
else:
parent.children.add(node)
node.parent = parent
depth -= 1
experiment.root.parent = None
# Represent the set of connected nodes and edges as
# a StaticExperimentTree
experiment.graph = StaticExperimentTree(node_map, edge_map, toposort)
return experiment
def run(self, scheduler="single-threaded"):
"""Run the Experiment defined by the graph using dask.
Args:
scheduler (str): How dask should schedule the nodes in the graph
(see the dask documentation for more information).
"""
self.save() # save basic experiment info to json
_ = dask.compute(self.leaves, scheduler=scheduler)
# when dask goes thru the tree, it knows the full sequence of ops
# needed to compute each leaf, so this gives dask full authority in
# determining the best dispatch path.
class ExperimentStatePromise:
"""An ExperimentStatePromise is a construct to allow for a lazy graph.
Specifically, an ExperimentStatePromise encapsulates a lazy-evaluated
function that represents the transition from state to state.
Attributes:
promise (object): The object to be lazily acted upon.
id (uuid.UUID): Unique ID of this experiment state promise.
previous_id (uuid.UUID): ID of the state which directly feeds into the
next state defined by the realization of the promise.
experiment_object (Experiment): The anchor Experiment for this promise.
"""
def __init__(self, promise, id, previous_id, experiment_object):
"""Creates a new promose from an existing object.
Args:
promise (object): The object to be lazily acted upon.
id (uuid.UUID): Unique ID of this experiment state promise.
previous_id (uuid.UUID): ID of the state which directly feeds
into the next state defined by the realization of the promise.
experiment_object (Experiment): The anchor Experiment for
this promise.
"""
self.promise = promise
self.id = id
self.previous_id = previous_id
self.experiment_object = experiment_object
def promise_from_callable(self, fn):
"""Defines the function which is to act on the promise which evolves
the object from state A to state B.
Args:
fn (Callable): Function that takes as input an object of whatever
type the promise is and returns a python object.
Returns:
ExperimentStatePromise
"""
return ExperimentStatePromise(
promise=dask.delayed(fn)(self.promise),
id=uuid.uuid4(),
previous_id=self.id,
experiment_object=self.experiment_object,
)
def get(self, scheduler="single-threaded"):
"""Evaluate the graph to turn the current state from a promise into
a computed experiment state.
Args:
scheduler (str): How dask should schedule the nodes in the graph
(see the dask documentation for more information).
"""
# this is for all the lazily evaluated states
if hasattr(self.promise, "compute"):
return self.promise.compute(scheduler=scheduler)
# if a state is already materialized
else:
return self.promise
class ExperimentState:
"""An ExperimentState represents a point in time with the evolution of an
experiment. An ExperimentState can have any number of objects attached to
it as attributes. Importantly, we must have a consistent way to save a
state such that it can be reloaded if an identical experiment is to be
run (cacheing).
Attributes:
experiment_object (Experiment): The anchor experiment for the state.
parent_sha (str): Path to the saved version of the parent state.
from_chache (bool): TODO
root_state (bool): whether this experiment state is the root.
save_pre_hooks (list):
save_post_hooks (list):
load_post_hooks (list):
recipe (Recipe): recipe that generated the current state. None if
this is the root.
slim_loaded (bool): whether the state is only loaded in its slim
version (True), or in its full version (False)
slim_sha (str): sha generated to uniquely identify this state
tags (list): strings that identify the state in the graph
parent (ExperimentState): state containing the direct parent of the
current ExperimentState. None if this is the root.
children (set): ExperimentState objects that are directly derived
from the current state
root (ExperimentState): state containing the root of the experiment the
current ExperimentState belongs to.
directory (PosixPath): directory where the experiment is stored on
disk
path (PosixPath): path to the current ExperimentState on disk
"""
PROPERTIES = [] # will be stored
NONHASHED_ATTRIBUTES = [] # will not be stored
def __init__(self, parent_sha=None, experiment_object=None):
self.parent_sha = parent_sha
self.experiment_object = experiment_object
self.from_cache = False
self.root_state = False
# Set up pre- and post-hooks for saving and loading.
self.save_pre_hooks = []
self.save_post_hooks = []
# N.B., the concept of a load-pre-hook doesnt really make sense.
self.load_post_hooks = []
# Represent the recipe that describes the state transition leading
# to the current state. This allows us to cache *just* the nodes.
self._recipe = None
# Identify whether this is a slim-loaded state.
self.slim_loaded = False
self.slim_sha = None
# Keep a set of tags which help you identify the node in the graph
self.tags = []
# Initialize all properties to None
for prop in self.PROPERTIES + self.NONHASHED_ATTRIBUTES:
setattr(self, prop, None)
self.parent = None
self.children = set()
# If the experiment state has a parent, the following attributes will
# be inherited. These are shared across all states that stem from the
# same root
if self.parent_sha is not None:
parent = self.__class__.load(self.parent_sha, experiment_object)
for property_name in (
self.__class__.PROPERTIES
+ self.__class__.NONHASHED_ATTRIBUTES
):
value = getattr(parent, property_name, None)
setattr(self, property_name, value)
# remove parent -- no longer needed
parent.deflate()
del parent
def __repr__(self):
return (
f"{self.__class__.__name__}(sha={self.sha()}, tags={self.tags})"
)
@property
def recipe(self):
return self._recipe
@property
def root(self):
if self.experiment_object is not None:
return self.experiment_object.root
def sha(self):
"""Computes the unique hash value associated with this state"""
if self.slim_loaded:
return self.slim_sha
obj_keys = [
"parent_sha",
"_recipe",
"root_state",
"tags",
] + self.PROPERTIES
def marshal_obj(obj):
if isinstance(obj, str):
return obj
if isinstance(obj, pathlib.Path):
return str(obj)
if isinstance(obj, dict):
return str(
sorted(
list(
(marshal_obj(k), marshal_obj(v))
for k, v in obj.items()
)
)
)
if isinstance(obj, Recipe):
return str(
sorted(
list(
(
marshal_obj(prop),
marshal_obj(getattr(obj, prop, None)),
)
for prop in getattr(obj, "PROPERTIES", [])
)
)
)
if isinstance(obj, (list, tuple)):
return str(list(marshal_obj(o) for o in obj))
if isinstance(obj, set):
return str(sorted(list(marshal_obj(o) for o in obj)))
if isinstance(obj, int):
return str(obj)
if isinstance(obj, float):
return f"{obj:.8f}"
return pickle.dumps(obj)
obj = {
prop: marshal_obj(getattr(self, prop, None)) for prop in obj_keys
}
representation = str(sorted(list(obj.items())))
h = hashlib.md5()
h.update(representation.encode())
return h.hexdigest() + ("-root" if self.root_state else "")
def __getstate__(self):
"""
The getstate hook is invoked by dill and pickle - we don't want to
serialize the experiment object!
"""
o = dict(self.__dict__)
del o["experiment_object"]
return o
def __setstate__(self, s):
"""
The setstate hook is invoked by dill and pickle - we don't want to
serialize the experiment object!
"""
self.__dict__ = s
self.experiment_object = None
def restore(self):
"""Fully load a state back into memory.
Returns:
True, if the state has been inflated back in memory in its full
version; False, if the state wasn't found and wasn't loaded back
up correctly.
"""
path = self.path
parent = self.parent
if not hasattr(self, "children"):
self.children = set()
children = set(self.children)
self.slim_loaded = False
self.slim_sha = None
try:
experiment_object = self.experiment_object
# Reset exp object because it gets deleted from state before
# serialization
self.__dict__ = self.__class__.load(
path, experiment_object
).__dict__
self.from_cache = True
self.experiment_object = experiment_object
self.parent = parent
self.children = children
return True
except FileNotFoundError:
logger.info(f"No cached state at: {path}")
return False
def deflate(self):
"""Reduce the state back to its "slim" version by deleting all its
attributes that are not in ["parent_sha", "slim_sha", "slim_loaded",
"tags", "experiment_object", "parent", "children"]. This helps reduce
the memory footprint.
Returns:
True at the end of all defation operations
"""
sha = self.sha()
for slot in list(self.__dict__.keys()):
if slot not in {
"parent_sha",
"slim_sha",
"slim_loaded",
"tags",
"experiment_object",
"parent",
"children",
}:
o = getattr(self, slot)
delattr(self, slot)
del o
self.slim_loaded = True
self.slim_sha = sha
return True
@contextmanager
def lazy_load(self):
"""Returns the restored version of the state and automatically
handles deflating it when no longer in scope.
"""
try:
self.restore()
yield
finally:
self.deflate()
@property
def directory(self):
base = "./"
if self.experiment_object:
base = self.experiment_object.directory
return pathlib.Path(base).absolute()
@property
def path(self):
"""Path of this state on disk, obrained from the Experiment's
directory and the state's sha."""
return self.directory / self.sha()
def new_state(self, recipe=None):
state = self.__class__(
parent_sha=self.sha(), experiment_object=self.experiment_object
)
state._recipe = recipe
# TODO: should we add state to self.children??
# TODO: should we add self to state.parent??
return state
def save(self):
"""Serializes the state by dumping the json version of it after
executing all saving pre-hooks and hooks. A slim representation of the
state will also be saved by appending .slim.json to the file name for
quick experiment reconstruction and state reloading.
"""
path = self.path
sha = self.sha()
logger.debug(f"Saving to: {path}")
for hook in self.save_pre_hooks:
hook()
if hasattr(self, "save_hook") and callable(
getattr(self, "save_hook")
):
logger.debug("using custom save_hook")
getattr(self, "save_hook")(path)
else:
dill.dump(self, open(path, "wb"))
for hook in self.save_post_hooks:
hook()
slim_repr = {
"parent_sha": self.parent_sha,
"slim_sha": sha,
"slim_loaded": True,
"tags": self.tags,
}
json.dump(slim_repr, open(f"{path}.slim.json", "w"), indent=4)
@classmethod
def load(cls, sha, experiment=None, slim=False):
"""Reloads the state identified by `sha` into memory, either in its
slim format (slim=True) or its full format after executing loading
hooks (slim=False).
Args:
sha (str): sha generated to uniquely identify this state
experiment (Experiment): object that contains this node
slim (bool):whether to lead the ExperimentState in slim format
(True) or in full format (False)
Returns:
A reloaded ExperimentState
"""
if isinstance(experiment, (str, pathlib.Path)):
experiment_object = Experiment(
pathlib.Path(experiment).absolute(), cls
)
else:
experiment_object = experiment or Experiment(
pathlib.Path(".").absolute(), cls
)
path = pathlib.Path(experiment_object.directory) / sha
if slim:
state_dict = json.load(open(f"{path}.slim.json", "r"))
state = cls.__new__(cls)
state.__setstate__(state_dict)
else:
if hasattr(cls, "load_hook") and callable(
getattr(cls, "load_hook")
):
logger.debug("using custom load_hook")
state = getattr(cls, "load_hook")(path)
else:
state = dill.load(open(path, "rb"))
for hook in state.load_post_hooks:
hook()
state.experiment_object = experiment_object
return state
def to_promise(self):
"""Turn the ExperimentState into an ExperimentStatePromise to be able
to add another node to modify the graph on the fly.
Returns:
An ExperimentStatePromise corresponding to the current
ExperimentState.
"""
return ExperimentStatePromise(
promise=self,
id=uuid.uuid4(),
previous_id=None,
experiment_object=self.experiment_object,
)
class Recipe:
"""A Recipe represents a sequence of actions that modify an
ExperimentState. Subclass it and implement the `run` method to define
how the Recipe transforms an experiment state into another one.
"""
def __call__(self, experiment_state):
"""This is what adds the ops defined by the recipe to the graph
"""
new_state = experiment_state.promise_from_callable(
partial(
self.run_recipe,
# We want to "dereference" the tag list, as we're not
# guaranteed execution order and byref will lead to race
# conditions in execution plan.
tags=experiment_state.experiment_object.tags[:],
)
)
# If the previous state (`experiment_state`) was a leaf, but we now
# created a new state stemming from it, then the previous state can
# be removed from the set of leaves.
if experiment_state.id in experiment_state.experiment_object.leaves:
del experiment_state.experiment_object.leaves[experiment_state.id]
# The new state can now be added to the leaves
new_state.experiment_object.leaves[new_state.id] = new_state.promise
# Add it to the nodes as well
new_state.experiment_object.nodes[new_state.id] = new_state.promise
return new_state
def run_recipe(self, prev_state, tags=None):
"""Give birth to a new state from the state upon which the recipe is
acting, then modify this new state according to the instructions in
the recipe.
"""
new_state = prev_state.new_state(self)
new_state.tags = tags
prev_state.deflate()
new_state.restore()
# If we aren't in a cached state, actually do the work.
if not new_state.from_cache:
new_state = self.run(new_state)
if not isinstance(new_state, ExperimentState):
raise RuntimeError(
f"run() method missing valid return type. "
f"Found {type(new_state)}, expected ExperimentState"
)
new_state.save()
new_state.deflate()
return new_state
class Function:
"""Gives the ability to execute any function on an experiment state
without modifying the graph experiment states and adding new nodes. This
is intended for evaluation and analysis functions that do no cause a state
to logically transition into a new modified state. These functions should
not be modifying the state in any way, they should simply be probing it.
"""
def __init__(self, op):
self.op = op
def __call__(self, experiment_state):
"""This is what adds the ops defined by the recipe to the graph
"""
promise = dask.delayed(self._safe_op)(experiment_state.promise)
# If the previous state (`experiment_state`) was a leaf, but we now
# created a new state stemming from it, then the previous state can
# be removed from the set of leaves.
if experiment_state.id in experiment_state.experiment_object.leaves:
del experiment_state.experiment_object.leaves[experiment_state.id]
# The new state can now be added to the leaves
experiment_state.experiment_object.leaves[uuid.uuid4()] = promise
return experiment_state
def _safe_op(self, state):
state.restore()
try:
self.op(state)
finally:
state.deflate()
# Makes decorator-style prettier
function = Function
| dagger-master | dagger/experiment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fnmatch
from collections import namedtuple
class StaticExperimentTree(
namedtuple("StaticExperimentTree", ["node_map", "edge_map", "toposort"])
):
"""
StaticExperimentTree: Keeps track of objects needed for static graph
analysis of experiments. Conceptually related to a `dagger` `Experiment`,
but *not* used by dask to run the computation. A `StaticExperimentTree` is
simply used for visualization, bookkeeping, and analysis purposes.
node_map: dict that maps each node sha to the correct experiment state
edge_map: dict that maps each node sha to its set of children's shas
toposort: dict of dicts that maps each level in the tree (i.e. distance
from root) to the set of node shas at that level
"""
class NodeSet(list):
"""List of nodes in an experiment, augmented with functionalities for
easy filtering and inspection for analysis purposes.
"""
def filter(self, pattern):
"""Return all experiment states that match the pattern in their
tag.
Args:
pattern (string): substring to look for regular expression
matching
Returns:
NodeSet with the states whose tags match the pattern
"""
negation = pattern.startswith("!")
if negation:
pattern = pattern[1:]
return self.__class__(
node
for node in self
if negation
^ any(fnmatch.fnmatch(tag, pattern) for tag in node.tags)
)
def __or__(self, nodeset):
if not isinstance(nodeset, self.__class__):
raise TypeError(
f"Cannot compose with object of type:{type(nodeset)}"
)
return self.__class__(
{node for ns in [self, nodeset] for node in ns}
)
def __and__(self, nodeset):
if not isinstance(nodeset, self.__class__):
raise TypeError(
f"Cannot compose with object of type:{type(nodeset)}"
)
return self.__class__(set(self).intersection(nodeset))
@property
def iterator(self):
"""Iterator that takes care of restoring and deflating states as
they get accessed to reduce memory footprint to a minimum while
looping through states.
"""
for node in self:
with node.lazy_load():
yield node
@property
def nodes(self):
return self.__class__.NodeSet(self.node_map.values())
@property
def root(self):
topo_zero = list(self.toposort[0])
if len(topo_zero) != 1:
raise RuntimeError(
"Invalid graph - found more than one 'root' per toposort"
)
return self.node_map[topo_zero[0]]
def node(self, sha):
"""Access an experiment state by specifying its hash value.
Args:
sha (str): hash that identifies the state that we want to access
Returns:
ExperimentState corresponding to specified sha value.
"""
return self.node_map[sha]
def nodes_at_distance(self, distance):
"""Access all experiment states in an experiment that are at a
specific distance from the root state in the experimental tree.
Args:
distance (int): depth in the tree from the root to slice the tree
at.
Returns:
NodeSet with all states at that distance from the root in the
experimental tree.
"""
if distance not in self.toposort:
return self.__class__.NodeSet([])
return self.__class__.NodeSet(
{self.node_map[sha] for sha in self.toposort[distance]}
)
def to_graphviz(self, node_args=None):
"""Constructs a graphviz visual graph of the experiment tree for easy
visual inspection. Each state is connected by directed arrows to its
children that were created by acting with a Recipe on the state
itself. The default appearance will display each node's hash value,
level, and tags. The appearance of each node can be modified by
passing in `node_args`.
Returns:
A graphviz.Digraph of the StaticExperimentTree
"""
from graphviz import Digraph
dot = Digraph(comment="Experiment Graph")
node_args = node_args or {}
for distance, levelset in self.toposort.items():
distance = f"level=<FONT COLOR='magenta'>{distance}</FONT>"
for sha in levelset:
node = self.node(sha)
tags = ""
if node.tags:
tags = [
f"<FONT COLOR='red'>'{tag}'</FONT>"
for tag in node.tags
]
tags = f", tags={', '.join(tags)}"
dot.node(
sha,
(
f"<<FONT COLOR='blue'>{sha}</FONT> "
f"({distance}{tags})>"
),
shape="rect",
fontname="menlo",
**node_args,
)
for parent, children in self.edge_map.items():
for child in children:
dot.edge(parent, child)
return dot
def draw(self, filename="graph", format="pdf", view=False):
"""Draw the graph of the StaticExperimentTree and save it to disk at
the specified location. If possible and `view` is set to True,
display it.
"""
d = self.to_graphviz()
def _is_notebook():
"""Identify whether the graph should be visualized in a jupyter
notebook.
"""
import os
env = os.environ
shell = "shell"
program = os.path.basename(env["_"])
if "jupyter" in program:
return True
if "JPY_PARENT_PID" in env:
return True
return False
if view and _is_notebook():
d.render(
filename=filename, format=format, view=False, cleanup=True
)
from IPython.display import Image
return Image(filename + "." + format)
else:
return d.render(
filename=filename, format=format, view=view, cleanup=True
)
| dagger-master | dagger/static.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
import random
from operator import add, mul
import pytest
from dask.delayed import Delayed
from dagger import (
Experiment,
ExperimentState,
ExperimentStatePromise,
Function,
Recipe,
)
class EmptyState(ExperimentState):
PROPERTIES = ["a", "B"]
NONHASHED_ATTRIBUTES = ["c"]
def initialize_state(self, **kwargs):
pass
class OpRecipe(Recipe):
PROPERTIES = ["op", "x", "stochastic"]
def __init__(self, op, x, stochastic=False):
self.op = op
self.x = x
self.stochastic = stochastic
def run(self, s):
s.c = self.op(s.a, self.x)
if self.stochastic:
s.c *= random.gauss(0, 1)
return s
class TestExperimentState:
def test_state_init(self):
"""Test that at initialization all attributes are None or empty"""
s = EmptyState()
assert s.parent is None
assert s.parent_sha is None
assert not s.children
assert s.experiment_object is None
assert not s.from_cache
assert not s.root_state
assert s.recipe is None
assert hasattr(s, "a") and s.a is None
assert hasattr(s, "B") and s.B is None
assert hasattr(s, "c") and s.c is None
def test_state_sha_identical(self):
"""Two identical states should have the same hash"""
s = EmptyState()
r = EmptyState()
assert r.sha() == s.sha()
def test_state_sha_indep_exp(self, tmpdir):
"""Test that the sha is independent of which experiment the state
belongs to, and it only captures the nature of the state itself.
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s_lone = EmptyState(experiment_object=None)
s = EmptyState(experiment_object=exp)
assert s_lone.sha() == s.sha()
def test_state_exp(self, tmpdir):
"""Test that the experiment object is correctly assigned to the state
when specified, but is removed when calling __getstate__ (used for
serialization) and is set to None by __setstate___.
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
assert "experiment_object" in s.__dict__
assert s.experiment_object == exp
assert "experiment_object" not in s.__getstate__()
# Resetting the state from another state should set the experiment to
# None because the experiment may either not be instantiated if states
# are being reloaded from disk, or, in general, the copy of a state
# may not logically belong to the same experiment as the original one.
s.__setstate__(s.__dict__)
assert "experiment_object" in s.__dict__
assert s.experiment_object is None
def test_state_root_inheritance(self, tmpdir):
"""Test that a state that is instatiated within an experiment that
has a root will correctly inherit the pointer to the root to be able
to refer back to it when needed (e.g. for weight resetting).
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
root = EmptyState(experiment_object=exp)
exp.root = root # manually set it to the root of the experiment
# Ideally this should also be set to True, but this is not used here
# and root setting should never happen manually anyways, so this is
# internally handled when a new tree is spawned.
# root.root_state = True
s = EmptyState(experiment_object=exp)
assert s.root == root
def test_state_save(self, tmpdir):
"""Test that a state created within an experiment inherits the right
directory and gets saved in there upon calling the `save` method. This
should generate a pickle file with the state and a json file with the
state info for slim reloading.
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp) # assign within exp
assert s.directory == tmpdir
s.save() # save state to disk
assert os.path.isfile(os.path.join(tmpdir, s.sha()))
assert os.path.isfile(os.path.join(tmpdir, s.sha() + ".slim.json"))
def test_state_restore(self, tmpdir):
# Create state and assign some properties
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
s.a = 5
s.B = "hello"
s.c = 12345
# Save it to disk
s.save()
assert not s.from_cache
# Deflate it to remove all properties and put it in "slim" state
s.deflate()
assert not hasattr(s, "a")
assert not hasattr(s, "B")
assert not hasattr(s, "c")
# Restore it from disk to demonstrate that it was saved correctly and
# we are able to recover the properties
s.restore()
assert s.a == 5
assert s.B == "hello"
assert s.c == 12345
assert s.from_cache
def test_save_hooks(self, tmpdir):
"""Test that save pre- and post-hooks are working correctly.
Note: this should be done with the property `c` which is part of the
NONHASHED_ATTRIBUTES. If we modified `a` or `B`, the hash would
change because they contribute to its value, and restoring would fail
because no corresponding state is found on disk. See below for test of
that behavior.
"""
from types import MethodType
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
# Set c to 5 to begin with
s.c = 5
def change_c(self, value):
self.c = value
s.change_c = MethodType(change_c, s)
# Before saving, set c to 6
s.save_pre_hooks = [lambda: s.change_c(6)]
# After saving, set c to 7
s.save_post_hooks = [lambda: s.change_c(7)]
s.save()
# We are after saving, so c should be 7
assert s.c == 7
# Deflate and reload the state from disk. The saved version of the
# state should have the value of c that was set before saving, i.e. 6
s.deflate()
s.restore()
assert s.c == 6
def test_state_sha_save_hook(self, tmpdir):
"""When restoring fails, it returns False. Check that the save post
hook sets the new value correctly, thus modifying the hash value of
the state when the property that gets modified is part of PROPERTIES.
"""
from types import MethodType
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
# Set a to 5 to begin with
s.a = 5
def change_a(self, value):
self.a = value
s.change_a = MethodType(change_a, s)
# After saving, set a to 7
s.save_post_hooks = [lambda: s.change_a(7)]
s.save()
s.deflate()
assert not s.restore()
def test_new_state(self, tmpdir):
"""Generating a new state from a previous one using new_state should
generate the right connection between the two states, which can be
inspected through the setting of a parent_sha and then in the way
the experiment graph is drawn when the StaticExperimentTree is
reloaded.
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
# Set it as the root of the experiment
exp.root = s
s.save()
# Generate a new child state from state `s`
r = EmptyState.new_state(s)
assert r.parent_sha == s.sha()
assert r.experiment_object == exp
r.save()
exp.save()
exp = Experiment.restore(directory=tmpdir) # reload experiment
# Test that the graph looks as expected with the connection
assert len(exp.graph.nodes) == 2
assert exp.graph.edge_map[s.sha()] == set([r.sha()])
def test_state_lazy_load(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
s = EmptyState(experiment_object=exp)
exp.root = s
s.save()
# Generate a new child state from state `s`
r = EmptyState.new_state(s)
r.save()
exp.save()
# Reload the two-state experiment
# By default, slim=True
exp = Experiment.restore(directory=tmpdir)
for node, state in exp.graph.node_map.items():
assert state.slim_loaded # deflated
with state.lazy_load():
assert not state.slim_loaded # fully loaded
assert state.slim_loaded # deflated
# Check behavior change as slim is set to False when exp is restored
exp = Experiment.restore(directory=tmpdir, slim=False)
for node, state in exp.graph.node_map.items():
assert not state.slim_loaded # deflated
with state.lazy_load():
assert not state.slim_loaded # fully loaded
# Note: lazy_load deflates the state even if it was initially
# fully loaded!
assert state.slim_loaded # deflated
class TestExperiment:
def test_experiment_init(self, tmpdir):
"""Test that the conditions we expect after the initialization of an
Experiment are met."""
exp = Experiment(directory=tmpdir, state_class=ExperimentState)
assert os.path.isdir(tmpdir)
assert exp.root is None
def test_experiment_tags(self, tmpdir):
"""Test that the tags context manager is working as designed by
adding tags to the experiment when inside the corresponding with
statement."""
exp = Experiment(directory=tmpdir, state_class=ExperimentState)
assert not exp.tags
with exp.tag("test_tag"):
assert exp.tags == ["test_tag"]
assert not exp.tags
with exp.tag("test_tag"):
with exp.tag("second_tag"):
assert exp.tags == ["test_tag", "second_tag"]
assert exp.tags == ["test_tag"]
def test_spawn_new_tree_error(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {}
with pytest.raises(KeyError):
exp.spawn_new_tree(**exp_args)
def test_spawn_new_tree(self, tmpdir):
"""
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
# The argument 'c' should be ignored, as it is not set from thew
# constructor in spawn_new_tree
exp_args = {"a": "first", "B": "second", "c": "shouldnotpropagate"}
root = exp.spawn_new_tree(**exp_args)
assert type(root) == ExperimentStatePromise
root_state = root.get()
assert exp.root is root_state
assert isinstance(root_state, EmptyState)
assert root_state.a == exp_args["a"]
assert root_state.B == exp_args["B"]
assert root_state.c is None
def test_spawn_new_tree_oldroot(self, tmpdir):
# TODO: implement
pass
class TestRecipe:
def test_bad_recipe(self, tmpdir):
# This should fail because run does not return a state!
class Bad(Recipe):
def run(self, s):
return None
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 10.0, "B": "notused"}
root = exp.spawn_new_tree(**exp_args)
with pytest.raises(RuntimeError):
Bad()(root).get()
def test_spawn_new_tree_recipe(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 10.0, "B": "notused"}
root = exp.spawn_new_tree(**exp_args)
# This recipe sets the non-hashed attribute `c`, so we check that we
# do state.c = 10 * 1.5
op = OpRecipe(mul, 1.5)
result = op(root)
out = result.get()
# When we `get` the result, it will be slim-loaded, verify that and
# restore the output.
assert out.slim_loaded
assert out.restore()
assert not out.slim_loaded
# Very the Op was applied
assert out.c == 15.0
def test_cache_works(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 10.0, "B": "notused"}
root = exp.spawn_new_tree(**exp_args)
# In the stochastic version of this, the output of the Op will change
# if `run` is called again. To make sure we're using the cached nodes,
# we check to make sure multiple runs yield the same output.
op = OpRecipe(mul, 0.4, stochastic=True)
result = op(root)
out = result.get()
assert out.restore()
value = out.c
# Get the value a second time by re-running through the graph, assert
# the same
result2 = op(root)
out2 = result2.get()
assert out2.restore()
assert value == out2.c
# Remove all cached states, rebuild the experiment, and assert the
# value changes.
for f in glob.glob(str(tmpdir / "*")):
os.remove(f)
# Now recreate
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 10.0, "B": "notused"}
root = exp.spawn_new_tree(**exp_args)
result3 = op(root)
out3 = result3.get()
assert out3.restore()
assert value != out3.c
def test_same_recipe(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 10.0, "B": "notused"}
root = exp.spawn_new_tree(**exp_args)
op = OpRecipe(mul, 0.4, stochastic=False)
new_state_a = op(root)
new_state_b = op(root)
new_state_c = op(root)
# make sure this creates three new dask delayed states
assert len(exp.leaves) == 3
for leafID, leaf in exp.leaves.items():
assert type(leaf) == Delayed
# Since this is the same op on the root, these three ops would result
# in 3 identical states. Check, therefore, that only one state is
# created
exp.run()
exp = Experiment.restore(directory=tmpdir, state_class=EmptyState)
assert len(exp.graph.node_map) == 2 # root + new state
# Test actual restoring from cache by hand by replicating what
# happens in `run_recipe`
exp1 = Experiment(directory=tmpdir, state_class=EmptyState)
root1 = exp1.spawn_new_tree(**exp_args)
assert root1.get().from_cache # same root
new_state1 = root1.get().new_state(op)
assert new_state1.restore()
class TestFunction:
def test_function(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 1, "B": 2}
root = exp.spawn_new_tree(**exp_args)
op = OpRecipe(mul, 0.4)
state1 = op(root)
assert len(exp.leaves) == 1
leaf1 = list(exp.leaves.items())[0]
function = Function(lambda s: print("c = {}".format(s.c)))
state2 = function(state1)
# The state should not be modified by the function because functions
# are non state mutating operations
assert state2 == state1
# check that the previous leaf has been replaced by the new leaf
assert len(exp.leaves) == 1
leaf2 = list(exp.leaves.items())[0]
assert leaf2 != leaf1
def test_function_exception(self, tmpdir):
"""Test that when the function fails, the relative error gets raised
upon running the graph (not at graph definition time).
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 1, "B": 2}
root = exp.spawn_new_tree(**exp_args)
function = Function(lambda s: print("d = {}".format(s.d)))
s = function(root)
with pytest.raises(AttributeError):
exp.run()
def test_function_safe_op(self, tmpdir):
"""Regardless of whether the op in the function fails or succeeds,
the state it acts on gets deflated.
"""
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 1, "B": 2}
root = exp.spawn_new_tree(**exp_args)
exp.run()
exp = Experiment.restore(directory=tmpdir, state_class=EmptyState)
assert exp.root.slim_loaded
badfunction = Function(lambda s: print("d = {}".format(s.d)))
with pytest.raises(AttributeError):
s = badfunction._safe_op(exp.root)
assert exp.root.slim_loaded
goodfunction = Function(lambda s: print("a = {}".format(s.a)))
s = goodfunction._safe_op(exp.root)
assert exp.root.slim_loaded
class TestStaticExperimentTree:
def test_tag_filtering(self, tmpdir):
exp = Experiment(directory=tmpdir, state_class=EmptyState)
exp_args = {"a": 1.0, "B": 2.0, "c": 3.0}
root = exp.spawn_new_tree(**exp_args)
op_add = OpRecipe(add, 1.2)
with exp.tag("ops"):
with exp.tag("phase:mul"):
x1 = OpRecipe(mul, 0.4)(root)
x2 = OpRecipe(mul, 0.5)(root)
with exp.tag("phase:add"):
y1 = op_add(x1)
y2 = op_add(x2)
exp.run()
exp = Experiment.restore(directory=tmpdir, state_class=EmptyState)
assert len(exp.graph.nodes.filter("op*")) == 4
assert (
len(
exp.graph.nodes.filter("phase:mul")
| exp.graph.nodes.filter("phase:add")
)
== 4
)
assert len(exp.graph.nodes.filter("!phase:mul")) == 3
assert (
len(
exp.graph.nodes.filter("ops")
& exp.graph.nodes.filter("!phase:add")
)
== 2
)
# Cannot compose other objects with a nodeset
with pytest.raises(TypeError):
exp.graph.nodes.filter("phase:mul") | "hi"
with pytest.raises(TypeError):
exp.graph.nodes.filter("phase:*") & "!hi"
| dagger-master | tests/test_dag.py |
import sys
sys.path.insert(0, "Mask2Former")
import tempfile
from pathlib import Path
import numpy as np
import cv2
import cog
# import some common detectron2 utilities
from detectron2.config import CfgNode as CN
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
from detectron2.projects.deeplab import add_deeplab_config
# import Mask2Former project
from mask2former import add_maskformer2_config
class Predictor(cog.Predictor):
def setup(self):
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file("Mask2Former/configs/coco/panoptic-segmentation/swin/maskformer2_swin_large_IN21k_384_bs16_100ep.yaml")
cfg.MODEL.WEIGHTS = 'model_final_f07440.pkl'
cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True
cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = True
cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = True
self.predictor = DefaultPredictor(cfg)
self.coco_metadata = MetadataCatalog.get("coco_2017_val_panoptic")
@cog.input(
"image",
type=Path,
help="Input image for segmentation. Output will be the concatenation of Panoptic segmentation (top), "
"instance segmentation (middle), and semantic segmentation (bottom).",
)
def predict(self, image):
im = cv2.imread(str(image))
outputs = self.predictor(im)
v = Visualizer(im[:, :, ::-1], self.coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
panoptic_result = v.draw_panoptic_seg(outputs["panoptic_seg"][0].to("cpu"),
outputs["panoptic_seg"][1]).get_image()
v = Visualizer(im[:, :, ::-1], self.coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
instance_result = v.draw_instance_predictions(outputs["instances"].to("cpu")).get_image()
v = Visualizer(im[:, :, ::-1], self.coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
semantic_result = v.draw_sem_seg(outputs["sem_seg"].argmax(0).to("cpu")).get_image()
result = np.concatenate((panoptic_result, instance_result, semantic_result), axis=0)[:, :, ::-1]
out_path = Path(tempfile.mkdtemp()) / "out.png"
cv2.imwrite(str(out_path), result)
return out_path
| CutLER-main | videocutler/predict.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
from shapely.errors import ShapelyDeprecationWarning
import warnings
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
import copy
import itertools
import logging
import os
from collections import OrderedDict
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import (
# DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
# MaskFormer
from mask2former import add_maskformer2_config
from mask2former_video import (
YTVISDatasetMapper,
YTVISEvaluator,
add_maskformer2_video_config,
build_detection_train_loader,
build_detection_test_loader,
get_detection_dataset_dicts,
)
# additional settings
from mask2former_video.engine import DefaultTrainer
# setup wandb
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
os.makedirs(output_folder, exist_ok=True)
return YTVISEvaluator(dataset_name, cfg, True, output_folder)
@classmethod
def build_train_loader(cls, cfg):
dataset_name = cfg.DATASETS.TRAIN[0]
mapper = YTVISDatasetMapper(cfg, is_train=True)
dataset_dict = get_detection_dataset_dicts(
dataset_name,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
return build_detection_train_loader(cfg, mapper=mapper, dataset=dataset_dict)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
dataset_name = cfg.DATASETS.TEST[0]
mapper = YTVISDatasetMapper(cfg, is_train=False)
return build_detection_test_loader(cfg, dataset_name, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_optimizer(cls, cfg, model):
weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM
weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED
base_lr_multiplier_names = cfg.SOLVER.BASE_LR_MULTIPLIER_NAMES
base_lr_multiplier = cfg.SOLVER.BASE_LR_MULTIPLIER
defaults = {}
defaults["lr"] = cfg.SOLVER.BASE_LR
defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if "backbone" in module_name:
hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER
if (
"relative_position_bias_table" in module_param_name
or "absolute_pos_embed" in module_param_name
):
print(module_param_name)
hyperparams["weight_decay"] = 0.0
if isinstance(module, norm_module_types):
hyperparams["weight_decay"] = weight_decay_norm
if isinstance(module, torch.nn.Embedding):
hyperparams["weight_decay"] = weight_decay_embed
if module_name in base_lr_multiplier_names:
hyperparams["lr"] *= base_lr_multiplier
print(" Checked: ", module_name, hyperparams["lr"])
params.append({"params": [value], **hyperparams})
def maybe_add_full_model_gradient_clipping(optim):
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Evaluate the given model. The given model is expected to already contain
weights to evaluate.
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
from torch.cuda.amp import autocast
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
with autocast():
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# for poly lr schedule
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_maskformer2_video_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# NOTE: also need to change detectron2/detectron2/enginee/defaults.py
# NOTE: need to change detectron2/detectron2/data/detection_utils.py
# NOTE: need to change detectron2/detectron2/data/transforms/transform.py
if args.test_dataset != "": cfg.DATASETS.TEST = ((args.test_dataset),)
if args.train_dataset != "": cfg.DATASETS.TRAIN = ((args.train_dataset),)
if args.steps != 0: cfg.SOLVER.STEPS = (int(args.steps),)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "mask_former" module
setup_logger(name="mask2former")
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former_video")
return cfg
def main(args):
cfg = setup(args)
# start a new wandb run to track this script
# if not args.eval_only or args.wandb_name != "":
# if comm.is_main_process(): # only on main process
# wandb.init(
# # set the wandb project where this run will be logged
# project="VideoCutLER",
# sync_tensorboard=True,
# name=args.wandb_name,
# entity="xdwang",
# )
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
raise NotImplementedError
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| CutLER-main | videocutler/train_net_video.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Modified by XuDong Wang from detectron2 and cocoapi
import argparse
import os
from mask2former_video.data_video.datasets.ytvis_api.ytvoseval import YTVOSeval
from mask2former_video.data_video.datasets.ytvis_api.ytvos import YTVOS
def print_and_summary(cocoEval):
str_print = ""
for key in cocoEval.stats:
str_print += "{:.2f},".format(key*100)
return str_print
def get_parser():
parser = argparse.ArgumentParser(description="eval configs")
parser.add_argument(
"--dataset-path", default="DATASETS", help="path to the annotation file",
)
parser.add_argument(
"--dataset-name", default="ytvis_2019", help="path to the annotation file",
)
parser.add_argument(
"--result-path", default="OUTPUT", help="path to the the result file",
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
annFile = os.path.join(args.dataset_path, args.dataset_name, 'train.json')
cocoGt=YTVOS(annFile)
resFile = os.path.join(args.result_path, 'inference/results.json')
cocoDt=cocoGt.loadRes(resFile)
annType = 'segm'
print('Running demo for {} results.'.format(annType))
cocoEval = YTVOSeval(cocoGt,cocoDt,annType)
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
copypaste = print_and_summary(cocoEval)
print(copypaste) | CutLER-main | videocutler/eval_ytvis.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
from shapely.errors import ShapelyDeprecationWarning
import warnings
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
import copy
import itertools
import logging
import os
from collections import OrderedDict
from typing import Any, Dict, List, Set
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
# MaskFormer
from mask2former import (
COCOInstanceNewBaselineDatasetMapper,
COCOPanopticNewBaselineDatasetMapper,
InstanceSegEvaluator,
MaskFormerInstanceDatasetMapper,
MaskFormerPanopticDatasetMapper,
MaskFormerSemanticDatasetMapper,
SemanticSegmentorWithTTA,
add_maskformer2_config,
)
import random
# setup wandb
import wandb
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# semantic segmentation
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type in [
"coco_panoptic_seg",
"ade20k_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Mapillary Vistas
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Cityscapes
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "cityscapes_panoptic_seg":
if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
evaluator_list.append(CityscapesSemSegEvaluator(dataset_name))
if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
evaluator_list.append(CityscapesInstanceEvaluator(dataset_name))
# ADE20K
if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder))
# LVIS
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
# Semantic segmentation dataset mapper
if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic":
mapper = MaskFormerSemanticDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
# Panoptic segmentation dataset mapper
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
mapper = MaskFormerPanopticDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
# Instance segmentation dataset mapper
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
# coco instance segmentation lsj new baseline
elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj":
mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
# coco panoptic segmentation lsj new baseline
elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj":
mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
else:
mapper = None
return build_detection_train_loader(cfg, mapper=mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_optimizer(cls, cfg, model):
weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM
weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED
defaults = {}
defaults["lr"] = cfg.SOLVER.BASE_LR
defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if "backbone" in module_name:
hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER
if (
"relative_position_bias_table" in module_param_name
or "absolute_pos_embed" in module_param_name
):
print(module_param_name)
hyperparams["weight_decay"] = 0.0
if isinstance(module, norm_module_types):
hyperparams["weight_decay"] = weight_decay_norm
if isinstance(module, torch.nn.Embedding):
hyperparams["weight_decay"] = weight_decay_embed
params.append({"params": [value], **hyperparams})
def maybe_add_full_model_gradient_clipping(optim):
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA.
logger.info("Running inference with test-time augmentation ...")
model = SemanticSegmentorWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# for poly lr schedule
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
# use shell script commands to define testing datasets
if 'DATASETS.TEST' in args.opts:
dataset_opt_idx = args.opts.index('DATASETS.TEST')
args.opts[dataset_opt_idx+1] = (args.opts[dataset_opt_idx+1],)
cfg.merge_from_list(args.opts)
if args.test_dataset != "": cfg.DATASETS.TEST = ((args.test_dataset),)
if args.train_dataset != "": cfg.DATASETS.TRAIN = ((args.train_dataset),)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "mask_former" module
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask2former")
return cfg
def main(args):
cfg = setup(args)
if not args.eval_only:
if comm.is_main_process(): # only on main process
wandb.init(
# set the wandb project where this run will be logged
project="CutLER-M2F",
sync_tensorboard=True,
name=args.wandb_name,
entity="xdwang",
)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
rint = random.randint(0, 10000)
args.dist_url = args.dist_url.replace('12399', str(12399 + rint))
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| CutLER-main | videocutler/train_net.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copied from: https://github.com/facebookresearch/detectron2/blob/master/demo/predictor.py
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
| CutLER-main | videocutler/demo/predictor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detectron2/blob/master/demo/demo.py
import argparse
import glob
import multiprocessing as mp
import os
# fmt: off
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from mask2former import add_maskformer2_config
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "mask2former demo"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/coco/panoptic-segmentation/maskformer2_R50_bs16_50ep.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
| CutLER-main | videocutler/demo/demo.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import os
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import torch
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.utils.file_io import PathManager
from pycocotools import mask as maskUtils
from panopticapi.evaluation import PQStat
def default_argument_parser():
"""
Creates a parser with some common arguments used by analysis tools.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(description="Evaluate PQ metric for semantic segmentation.")
# NOTE: currently does not support Cityscapes, you need to convert
# Cityscapes prediction format to Detectron2 prediction format.
parser.add_argument(
"--dataset-name",
default="ade20k_sem_seg_val",
choices=["ade20k_sem_seg_val", "coco_2017_test_stuff_10k_sem_seg", "ade20k_full_sem_seg_val"],
help="dataset name you want to evaluate")
parser.add_argument("--json-file", default="", help="path to detection json file")
return parser
# Modified from the official panoptic api: https://github.com/cocodataset/panopticapi/blob/master/panopticapi/evaluation.py
def pq_compute_single_image(segm_gt, segm_dt, categories, ignore_label):
pq_stat = PQStat()
VOID = ignore_label
OFFSET = 256 * 256 * 256
pan_gt = segm_gt
pan_pred = segm_dt
gt_ann = {'segments_info': []}
labels, labels_cnt = np.unique(segm_gt, return_counts=True)
for cat_id, cnt in zip(labels, labels_cnt):
if cat_id == VOID:
continue
gt_ann['segments_info'].append(
{"id": cat_id, "category_id": cat_id, "area": cnt, "iscrowd": 0}
)
pred_ann = {'segments_info': []}
for cat_id in np.unique(segm_dt):
pred_ann['segments_info'].append({"id": cat_id, "category_id": cat_id})
gt_segms = {el['id']: el for el in gt_ann['segments_info']}
pred_segms = {el['id']: el for el in pred_ann['segments_info']}
# predicted segments area calculation + prediction sanity checks
pred_labels_set = set(el['id'] for el in pred_ann['segments_info'])
labels, labels_cnt = np.unique(pan_pred, return_counts=True)
for label, label_cnt in zip(labels, labels_cnt):
if label not in pred_segms:
if label == VOID:
continue
raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(image_id, label))
pred_segms[label]['area'] = label_cnt
pred_labels_set.remove(label)
if pred_segms[label]['category_id'] not in categories:
raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(image_id, label, pred_segms[label]['category_id']))
if len(pred_labels_set) != 0:
raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(image_id, list(pred_labels_set)))
# confusion matrix calculation
pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(np.uint64)
gt_pred_map = {}
labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)
for label, intersection in zip(labels, labels_cnt):
gt_id = label // OFFSET
pred_id = label % OFFSET
gt_pred_map[(gt_id, pred_id)] = intersection
# count all matched pairs
gt_matched = set()
pred_matched = set()
for label_tuple, intersection in gt_pred_map.items():
gt_label, pred_label = label_tuple
if gt_label not in gt_segms:
continue
if pred_label not in pred_segms:
continue
if gt_segms[gt_label]['iscrowd'] == 1:
continue
if gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']:
continue
union = pred_segms[pred_label]['area'] + gt_segms[gt_label]['area'] - intersection - gt_pred_map.get((VOID, pred_label), 0)
iou = intersection / union
if iou > 0.5:
pq_stat[gt_segms[gt_label]['category_id']].tp += 1
pq_stat[gt_segms[gt_label]['category_id']].iou += iou
gt_matched.add(gt_label)
pred_matched.add(pred_label)
# count false positives
crowd_labels_dict = {}
for gt_label, gt_info in gt_segms.items():
if gt_label in gt_matched:
continue
# crowd segments are ignored
if gt_info['iscrowd'] == 1:
crowd_labels_dict[gt_info['category_id']] = gt_label
continue
pq_stat[gt_info['category_id']].fn += 1
# count false positives
for pred_label, pred_info in pred_segms.items():
if pred_label in pred_matched:
continue
# intersection of the segment with VOID
intersection = gt_pred_map.get((VOID, pred_label), 0)
# plus intersection with corresponding CROWD region if it exists
if pred_info['category_id'] in crowd_labels_dict:
intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0)
# predicted segment is ignored if more than half of the segment correspond to VOID and CROWD regions
if intersection / pred_info['area'] > 0.5:
continue
pq_stat[pred_info['category_id']].fp += 1
return pq_stat
def main():
parser = default_argument_parser()
args = parser.parse_args()
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
json_file = args.json_file
with open(json_file) as f:
predictions = json.load(f)
imgToAnns = defaultdict(list)
for pred in predictions:
image_id = os.path.basename(pred["file_name"]).split(".")[0]
imgToAnns[image_id].append(
{"category_id" : pred["category_id"], "segmentation" : pred["segmentation"]}
)
image_ids = list(imgToAnns.keys())
meta = MetadataCatalog.get(args.dataset_name)
class_names = meta.stuff_classes
num_classes = len(meta.stuff_classes)
ignore_label = meta.ignore_label
conf_matrix = np.zeros((num_classes + 1, num_classes + 1), dtype=np.int64)
categories = {}
for i in range(num_classes):
categories[i] = {"id": i, "name": class_names[i], "isthing": 0}
pq_stat = PQStat()
for image_id in tqdm(image_ids):
if args.dataset_name == "ade20k_sem_seg_val":
gt_dir = os.path.join(_root, "ADEChallengeData2016", "annotations_detectron2", "validation")
segm_gt = read_image(os.path.join(gt_dir, image_id + ".png")).copy().astype(np.int64)
elif args.dataset_name == "coco_2017_test_stuff_10k_sem_seg":
gt_dir = os.path.join(_root, "coco", "coco_stuff_10k", "annotations_detectron2", "test")
segm_gt = read_image(os.path.join(gt_dir, image_id + ".png")).copy().astype(np.int64)
elif args.dataset_name == "ade20k_full_sem_seg_val":
gt_dir = os.path.join(_root, "ADE20K_2021_17_01", "annotations_detectron2", "validation")
segm_gt = read_image(os.path.join(gt_dir, image_id + ".tif")).copy().astype(np.int64)
else:
raise ValueError(f"Unsupported dataset {args.dataset_name}")
# get predictions
segm_dt = np.zeros_like(segm_gt)
anns = imgToAnns[image_id]
for ann in anns:
# map back category_id
if hasattr(meta, "stuff_dataset_id_to_contiguous_id"):
if ann["category_id"] in meta.stuff_dataset_id_to_contiguous_id:
category_id = meta.stuff_dataset_id_to_contiguous_id[ann["category_id"]]
else:
category_id = ann["category_id"]
mask = maskUtils.decode(ann["segmentation"])
segm_dt[mask > 0] = category_id
# miou
gt = segm_gt.copy()
pred = segm_dt.copy()
gt[gt == ignore_label] = num_classes
conf_matrix += np.bincount(
(num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=conf_matrix.size,
).reshape(conf_matrix.shape)
# pq
pq_stat_single = pq_compute_single_image(segm_gt, segm_dt, categories, meta.ignore_label)
pq_stat += pq_stat_single
metrics = [("All", None), ("Stuff", False)]
results = {}
for name, isthing in metrics:
results[name], per_class_results = pq_stat.pq_average(categories, isthing=isthing)
if name == 'All':
results['per_class'] = per_class_results
print("{:10s}| {:>5s} {:>5s} {:>5s} {:>5s}".format("", "PQ", "SQ", "RQ", "N"))
print("-" * (10 + 7 * 4))
for name, _isthing in metrics:
print("{:10s}| {:5.1f} {:5.1f} {:5.1f} {:5d}".format(
name,
100 * results[name]['pq'],
100 * results[name]['sq'],
100 * results[name]['rq'],
results[name]['n'])
)
# calculate miou
acc = np.full(num_classes, np.nan, dtype=np.float64)
iou = np.full(num_classes, np.nan, dtype=np.float64)
tp = conf_matrix.diagonal()[:-1].astype(np.float64)
pos_gt = np.sum(conf_matrix[:-1, :-1], axis=0).astype(np.float64)
pos_pred = np.sum(conf_matrix[:-1, :-1], axis=1).astype(np.float64)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
print("")
print(f"mIoU: {miou}")
if __name__ == '__main__':
main()
| CutLER-main | videocutler/tools/evaluate_pq_for_semantic_segmentation.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle as pkl
import sys
import torch
"""
Usage:
# download one of the ResNet{18,34,50,101,152} models from torchvision:
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
# run the conversion
./convert-torchvision-to-d2.py r50.pth r50.pkl
# Then, use r50.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/r50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STRIDE_IN_1X1: False
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
newmodel = {}
for k in list(obj.keys()):
old_k = k
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = obj.pop(old_k).detach().numpy()
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
if obj:
print("Unconverted keys:", obj.keys())
| CutLER-main | videocutler/tools/convert-torchvision-to-d2.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified by Bowen Cheng from: https://github.com/bowenc0221/boundary-iou-api/blob/master/tools/coco_instance_evaluation.py
"""
Evaluation for COCO val2017:
python ./tools/coco_instance_evaluation.py \
--gt-json-file COCO_GT_JSON \
--dt-json-file COCO_DT_JSON
"""
import argparse
import json
from boundary_iou.coco_instance_api.coco import COCO
from boundary_iou.coco_instance_api.cocoeval import COCOeval
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gt-json-file", default="")
parser.add_argument("--dt-json-file", default="")
parser.add_argument("--iou-type", default="boundary")
parser.add_argument("--dilation-ratio", default="0.020", type=float)
args = parser.parse_args()
print(args)
annFile = args.gt_json_file
resFile = args.dt_json_file
dilation_ratio = args.dilation_ratio
if args.iou_type == "boundary":
get_boundary = True
else:
get_boundary = False
cocoGt = COCO(annFile, get_boundary=get_boundary, dilation_ratio=dilation_ratio)
# remove box predictions
resFile = json.load(open(resFile))
for c in resFile:
c.pop("bbox", None)
cocoDt = cocoGt.loadRes(resFile)
cocoEval = COCOeval(cocoGt, cocoDt, iouType=args.iou_type, dilation_ratio=dilation_ratio)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if __name__ == '__main__':
main()
| CutLER-main | videocutler/tools/evaluate_coco_boundary_ap.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detectron2/blob/main/tools/analyze_model.py
import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table # can also try flop_count_str
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
# fmt: off
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
from mask2former import add_maskformer2_config
logger = logging.getLogger("detectron2")
def setup(args):
if args.config_file.endswith(".yaml"):
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(name="fvcore")
setup_logger()
return cfg
def do_flop(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
if args.use_fixed_input_size and isinstance(cfg, CfgNode):
import torch
crop_size = cfg.INPUT.CROP.SIZE[0]
data[0]["image"] = torch.zeros((3, crop_size, crop_size))
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
)
def do_activation(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
)
def do_parameter(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"-n",
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
parser.add_argument(
"--use-fixed-input-size",
action="store_true",
help="use fixed input size when calculating flops",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
| CutLER-main | videocutler/tools/analyze_model.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download pretrained swin model:
wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
# run the conversion
./convert-pretrained-model-to-d2.py swin_tiny_patch4_window7_224.pth swin_tiny_patch4_window7_224.pkl
# Then, use swin_tiny_patch4_window7_224.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/swin_tiny_patch4_window7_224.pkl"
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")["model"]
res = {"model": obj, "__author__": "third_party", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
| CutLER-main | videocutler/tools/convert-pretrained-swin-model-to-d2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
from itertools import count
import numpy as np
import torch
from fvcore.transforms import HFlipTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.modeling import DatasetMapperTTA
__all__ = [
"SemanticSegmentorWithTTA",
]
class SemanticSegmentorWithTTA(nn.Module):
"""
A SemanticSegmentor with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
self.cfg = cfg.clone()
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`SemanticSegmentor.forward`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.model.input_format)
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
processed_results = []
for x in batched_inputs:
result = self._inference_one_image(_maybe_read_image(x))
processed_results.append(result)
return processed_results
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
augmented_inputs, tfms = self._get_augmented_inputs(input)
final_predictions = None
count_predictions = 0
for input, tfm in zip(augmented_inputs, tfms):
count_predictions += 1
with torch.no_grad():
if final_predictions is None:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions = self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions = self.model([input])[0].pop("sem_seg")
else:
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
final_predictions += self.model([input])[0].pop("sem_seg").flip(dims=[2])
else:
final_predictions += self.model([input])[0].pop("sem_seg")
final_predictions = final_predictions / count_predictions
return {"sem_seg": final_predictions}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms
| CutLER-main | videocutler/mask2former/test_time_augmentation.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_maskformer2_config(cfg):
"""
Add config for MASK_FORMER.
"""
# NOTE: configs from original maskformer
# data config
# select the dataset mapper
cfg.INPUT.DATASET_MAPPER_NAME = "mask_former_semantic"
# Color augmentation
cfg.INPUT.COLOR_AUG_SSD = False
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Pad image and segmentation GT in dataset mapper.
cfg.INPUT.SIZE_DIVISIBILITY = -1
# solver config
# weight decay on embedding
cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0
# optimizer
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
# mask_former model config
cfg.MODEL.MASK_FORMER = CN()
# loss
cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True
cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1
cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0
cfg.MODEL.MASK_FORMER.POSITIVE_BANK_IOU_THRESH = 0.01
# transformer config
cfg.MODEL.MASK_FORMER.NHEADS = 8
cfg.MODEL.MASK_FORMER.DROPOUT = 0.1
cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048
cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0
cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6
cfg.MODEL.MASK_FORMER.PRE_NORM = False
cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256
cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100
cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = "res5"
cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False
# mask_former inference config
cfg.MODEL.MASK_FORMER.TEST = CN()
cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True
cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False
cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False
cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False
# Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)
# you can use this config to override
cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32
# pixel decoder config
cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256
# adding transformer in pixel decoder
cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0
# pixel decoder
cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder"
# swin transformer backbone
cfg.MODEL.SWIN = CN()
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
cfg.MODEL.SWIN.PATCH_SIZE = 4
cfg.MODEL.SWIN.EMBED_DIM = 96
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
cfg.MODEL.SWIN.WINDOW_SIZE = 7
cfg.MODEL.SWIN.MLP_RATIO = 4.0
cfg.MODEL.SWIN.QKV_BIAS = True
cfg.MODEL.SWIN.QK_SCALE = None
cfg.MODEL.SWIN.DROP_RATE = 0.0
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
cfg.MODEL.SWIN.APE = False
cfg.MODEL.SWIN.PATCH_NORM = True
cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
cfg.MODEL.SWIN.USE_CHECKPOINT = False
# NOTE: maskformer2 extra configs
# transformer module
cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = "MultiScaleMaskedTransformerDecoder"
# LSJ aug
cfg.INPUT.IMAGE_SIZE = 1024
cfg.INPUT.MIN_SCALE = 0.1
cfg.INPUT.MAX_SCALE = 2.0
# MSDeformAttn encoder configs
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"]
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8
# point loss configs
# Number of points sampled during training for a mask point head.
cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75
| CutLER-main | videocutler/mask2former/config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from .modeling.criterion import SetCriterion
from .modeling.matcher import HungarianMatcher
@META_ARCH_REGISTRY.register()
class MaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.test_topk_per_image = test_topk_per_image
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = HungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
thresh=cfg.MODEL.MASK_FORMER.POSITIVE_BANK_IOU_THRESH,
)
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = SetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": (
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON
or cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON
),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# inference
"semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON,
"instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON,
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances, images)
else:
targets = None
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
# upsample masks
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
del outputs
processed_results = []
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
processed_results.append({})
if self.sem_seg_postprocess_before_inference:
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
mask_pred_result, image_size, height, width
)
mask_cls_result = mask_cls_result.to(mask_pred_result)
# semantic segmentation inference
if self.semantic_on:
r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result)
if not self.sem_seg_postprocess_before_inference:
r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width)
processed_results[-1]["sem_seg"] = r
# panoptic segmentation inference
if self.panoptic_on:
panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result)
processed_results[-1]["panoptic_seg"] = panoptic_r
# instance segmentation inference
if self.instance_on:
instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result)
processed_results[-1]["instances"] = instance_r
return processed_results
def prepare_targets(self, targets, images):
h_pad, w_pad = images.tensor.shape[-2:]
new_targets = []
for targets_per_image in targets:
# pad gt
gt_masks = targets_per_image.gt_masks
padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device)
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks
new_targets.append(
{
"labels": targets_per_image.gt_classes,
"masks": padded_masks,
}
)
return new_targets
def semantic_inference(self, mask_cls, mask_pred):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
return semseg
def panoptic_inference(self, mask_cls, mask_pred):
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_mask_cls = mask_cls[keep]
cur_mask_cls = cur_mask_cls[:, :-1]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
segments_info = []
current_segment_id = 0
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
return panoptic_seg, segments_info
else:
# take argmax
cur_mask_ids = cur_prob_masks.argmax(0)
stuff_memory_list = {}
for k in range(cur_classes.shape[0]):
pred_class = cur_classes[k].item()
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values()
mask_area = (cur_mask_ids == k).sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0:
if mask_area / original_area < self.overlap_threshold:
continue
# merge stuff regions
if not isthing:
if int(pred_class) in stuff_memory_list.keys():
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
continue
else:
stuff_memory_list[int(pred_class)] = current_segment_id + 1
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": bool(isthing),
"category_id": int(pred_class),
}
)
return panoptic_seg, segments_info
def instance_inference(self, mask_cls, mask_pred):
# mask_pred is already processed to have the same shape as original input
image_size = mask_pred.shape[-2:]
# [Q, K]
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = topk_indices // self.sem_seg_head.num_classes
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
mask_pred = mask_pred[topk_indices]
# if this is panoptic segmentation, we only keep the "thing" classes
if self.panoptic_on:
keep = torch.zeros_like(scores_per_image).bool()
for i, lab in enumerate(labels_per_image):
keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values()
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
result = Instances(image_size)
# mask (before sigmoid)
result.pred_masks = (mask_pred > 0).float()
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# Uncomment the following to get boxes from masks (this is slow)
# result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
# calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
result.scores = scores_per_image * mask_scores_per_image
result.pred_classes = labels_per_image
return result
| CutLER-main | videocutler/mask2former/maskformer_model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from . import data # register all new datasets
from . import modeling
# config
from .config import add_maskformer2_config
# dataset loading
from .data.dataset_mappers.coco_instance_new_baseline_dataset_mapper import COCOInstanceNewBaselineDatasetMapper
from .data.dataset_mappers.coco_panoptic_new_baseline_dataset_mapper import COCOPanopticNewBaselineDatasetMapper
from .data.dataset_mappers.mask_former_instance_dataset_mapper import (
MaskFormerInstanceDatasetMapper,
)
from .data.dataset_mappers.mask_former_panoptic_dataset_mapper import (
MaskFormerPanopticDatasetMapper,
)
from .data.dataset_mappers.mask_former_semantic_dataset_mapper import (
MaskFormerSemanticDatasetMapper,
)
# models
from .maskformer_model import MaskFormer
from .test_time_augmentation import SemanticSegmentorWithTTA
# evaluation
from .evaluation.instance_evaluation import InstanceSegEvaluator
| CutLER-main | videocutler/mask2former/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
| CutLER-main | videocutler/mask2former/utils/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| CutLER-main | videocutler/mask2former/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
from torch.cuda.amp import autocast
from detectron2.projects.point_rend.point_features import point_sample
def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor, thresh: float):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
if targets.size()[0] != 0:
hw = inputs.shape[1]
drop_weights = torch.einsum("nc,mc->nm", inputs.ge(0.0).float(), targets).max(-1)[0] / hw
drop_weights = drop_weights.ge(thresh).float().detach()[:,None]
loss = drop_weights * loss
return loss
batch_dice_loss_jit = torch.jit.script(
batch_dice_loss
) # type: torch.jit.ScriptModule
def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor, thresh: float,):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
hw = inputs.shape[1]
pos = F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
# print("pos: ", pos.size())
neg = F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
# print("neg: ", neg.size())
loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum(
"nc,mc->nm", neg, (1 - targets)
)
if targets.size()[0] != 0:
drop_weights = torch.einsum("nc,mc->nm", inputs.ge(0.0).float(), targets).max(-1)[0] / hw
drop_weights = drop_weights.ge(thresh).float().detach()[:,None]
loss = drop_weights * loss
# print("drop_weights: ", drop_weights.size(), drop_weights.sum())
return loss / hw
batch_sigmoid_ce_loss_jit = torch.jit.script(
batch_sigmoid_ce_loss
) # type: torch.jit.ScriptModule
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
# used in mask2former/maskformer_model.py
def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0, thresh: float = 0.01):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost
cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0"
self.num_points = num_points
self.thresh = thresh
@torch.no_grad()
def memory_efficient_forward(self, outputs, targets):
"""More memory-friendly matching"""
bs, num_queries = outputs["pred_logits"].shape[:2]
indices = []
# Iterate through batch size
for b in range(bs):
out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes]
tgt_ids = targets[b]["labels"]
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
out_mask = outputs["pred_masks"][b] # [num_queries, H_pred, W_pred]
# gt masks are already padded when preparing target
tgt_mask = targets[b]["masks"].to(out_mask)
out_mask = out_mask[:, None]
tgt_mask = tgt_mask[:, None]
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)
# get gt labels
tgt_mask = point_sample(
tgt_mask,
point_coords.repeat(tgt_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
out_mask = point_sample(
out_mask,
point_coords.repeat(out_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
with autocast(enabled=False):
out_mask = out_mask.float()
tgt_mask = tgt_mask.float()
if out_mask.shape[0] == 0 or tgt_mask.shape[0] == 0:
cost_mask = batch_sigmoid_ce_loss(out_mask, tgt_mask, thresh=self.thresh)
cost_dice = batch_dice_loss(out_mask, tgt_mask, thresh=self.thresh)
else:
# Compute the focal loss between masks
cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask, thresh=self.thresh)
# Compute the dice loss betwen masks
cost_dice = batch_dice_loss_jit(out_mask, tgt_mask, thresh=self.thresh)
# Final cost matrix
C = (
self.cost_mask * cost_mask
+ self.cost_class * cost_class
+ self.cost_dice * cost_dice
)
C = C.reshape(num_queries, -1).cpu()
indices.append(linear_sum_assignment(C))
return [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))
for i, j in indices
]
@torch.no_grad()
def forward(self, outputs, targets):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
return self.memory_efficient_forward(outputs, targets)
def __repr__(self, _repr_indent=4):
head = "Matcher " + self.__class__.__name__
body = [
"cost_class: {}".format(self.cost_class),
"cost_mask: {}".format(self.cost_mask),
"cost_dice: {}".format(self.cost_dice),
]
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| CutLER-main | videocutler/mask2former/modeling/matcher.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .backbone.swin import D2SwinTransformer
from .pixel_decoder.fpn import BasePixelDecoder
from .pixel_decoder.msdeformattn import MSDeformAttnPixelDecoder
from .meta_arch.mask_former_head import MaskFormerHead
from .meta_arch.per_pixel_baseline import PerPixelBaselineHead, PerPixelBaselinePlusHead
| CutLER-main | videocutler/mask2former/modeling/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# No need to upsample predictions as we are using normalized coordinates :)
# N x 1 x H x W
src_masks = src_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
# sample point_coords
point_coords = get_uncertain_point_coords_with_randomness(
src_masks,
lambda logits: calculate_uncertainty(logits),
self.num_points,
self.oversample_ratio,
self.importance_sample_ratio,
)
# get gt labels
point_labels = point_sample(
target_masks,
point_coords,
align_corners=False,
).squeeze(1)
point_logits = point_sample(
src_masks,
point_coords,
align_corners=False,
).squeeze(1)
losses = {
"loss_mask": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),
"loss_dice": dice_loss_jit(point_logits, point_labels, num_masks),
}
del src_masks
del target_masks
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_masks):
loss_map = {
'labels': self.loss_labels,
'masks': self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_masks)
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_masks = sum(len(t["labels"]) for t in targets)
num_masks = torch.as_tensor(
[num_masks], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_masks)
num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def __repr__(self):
head = "Criterion " + self.__class__.__name__
body = [
"matcher: {}".format(self.matcher.__repr__(_repr_indent=8)),
"losses: {}".format(self.losses),
"weight_dict: {}".format(self.weight_dict),
"num_classes: {}".format(self.num_classes),
"eos_coef: {}".format(self.eos_coef),
"num_points: {}".format(self.num_points),
"oversample_ratio: {}".format(self.oversample_ratio),
"importance_sample_ratio: {}".format(self.importance_sample_ratio),
]
_repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| CutLER-main | videocutler/mask2former/modeling/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.utils.registry import Registry
from .position_encoding import PositionEmbeddingSine
from .transformer import Transformer
TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE")
TRANSFORMER_DECODER_REGISTRY.__doc__ = """
Registry for transformer module in MaskFormer.
"""
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)
@TRANSFORMER_DECODER_REGISTRY.register()
class StandardTransformerDecoder(nn.Module):
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dropout: float,
dim_feedforward: int,
enc_layers: int,
dec_layers: int,
pre_norm: bool,
deep_supervision: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dropout: dropout in Transformer
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
deep_supervision: whether to add supervision to every decoder layers
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
if in_channels != hidden_dim or enforce_input_project:
self.input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
else:
self.input_proj = nn.Sequential()
self.aux_loss = deep_supervision
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features, mask=None):
if mask is not None:
mask = F.interpolate(mask[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
pos = self.pe_layer(x, mask)
src = x
hs, memory = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos)
if self.mask_classification:
outputs_class = self.class_embed(hs)
out = {"pred_logits": outputs_class[-1]}
else:
out = {}
if self.aux_loss:
# [l, bs, queries, embed]
mask_embed = self.mask_embed(hs)
outputs_seg_masks = torch.einsum("lbqc,bchw->lbqhw", mask_embed, mask_features)
out["pred_masks"] = outputs_seg_masks[-1]
out["aux_outputs"] = self._set_aux_loss(
outputs_class if self.mask_classification else None, outputs_seg_masks
)
else:
# FIXME h_boxes takes the last one computed, keep this in mind
# [bs, queries, embed]
mask_embed = self.mask_embed(hs[-1])
outputs_seg_masks = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
out["pred_masks"] = outputs_seg_masks
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| CutLER-main | videocutler/mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from .position_encoding import PositionEmbeddingSine
from .maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@TRANSFORMER_DECODER_REGISTRY.register()
class MultiScaleMaskedTransformerDecoder(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "static_query" in k:
newk = k.replace("static_query", "query_feat")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
pre_norm: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# define Transformer decoder here
self.num_heads = nheads
self.num_layers = dec_layers
self.transformer_self_attention_layers = nn.ModuleList()
self.transformer_cross_attention_layers = nn.ModuleList()
self.transformer_ffn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.transformer_self_attention_layers.append(
SelfAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_cross_attention_layers.append(
CrossAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_ffn_layers.append(
FFNLayer(
d_model=hidden_dim,
dim_feedforward=dim_feedforward,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.decoder_norm = nn.LayerNorm(hidden_dim)
self.num_queries = num_queries
# learnable query features
self.query_feat = nn.Embedding(num_queries, hidden_dim)
# learnable query p.e.
self.query_embed = nn.Embedding(num_queries, hidden_dim)
# level embedding (we always use 3 scales)
self.num_feature_levels = 3
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
# NOTE: because we add learnable query features which requires supervision,
# we add minus 1 to decoder layers to be consistent with our loss
# implementation: that is, number of auxiliary losses is always
# equal to number of decoder layers. With learnable query features, the number of
# auxiliary losses equals number of decoders plus 1.
assert cfg.MODEL.MASK_FORMER.DEC_LAYERS >= 1
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - 1
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features, mask = None):
# x is a list of multi-scale feature
assert len(x) == self.num_feature_levels
src = []
pos = []
size_list = []
# disable mask, it does not affect performance
del mask
for i in range(self.num_feature_levels):
size_list.append(x[i].shape[-2:])
pos.append(self.pe_layer(x[i], None).flatten(2))
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
# flatten NxCxHxW to HWxNxC
pos[-1] = pos[-1].permute(2, 0, 1)
src[-1] = src[-1].permute(2, 0, 1)
_, bs, _ = src[0].shape
# QxNxC
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
predictions_class = []
predictions_mask = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
for i in range(self.num_layers):
level_index = i % self.num_feature_levels
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
# attention: cross-attention first
output = self.transformer_cross_attention_layers[i](
output, src[level_index],
memory_mask=attn_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=pos[level_index], query_pos=query_embed
)
output = self.transformer_self_attention_layers[i](
output, tgt_mask=None,
tgt_key_padding_mask=None,
query_pos=query_embed
)
# FFN
output = self.transformer_ffn_layers[i](
output
)
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
assert len(predictions_class) == self.num_layers + 1
out = {
'pred_logits': predictions_class[-1],
'pred_masks': predictions_mask[-1],
'aux_outputs': self._set_aux_loss(
predictions_class if self.mask_classification else None, predictions_mask
)
}
return out
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
# NOTE: prediction is of higher-resolution
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attn_mask = attn_mask.detach()
return outputs_class, outputs_mask, attn_mask
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
| CutLER-main | videocutler/mask2former/modeling/transformer_decoder/mask2former_transformer_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self, _repr_indent=4):
head = "Positional encoding " + self.__class__.__name__
body = [
"num_pos_feats: {}".format(self.num_pos_feats),
"temperature: {}".format(self.temperature),
"normalize: {}".format(self.normalize),
"scale: {}".format(self.scale),
]
# _repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| CutLER-main | videocutler/mask2former/modeling/transformer_decoder/position_encoding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from .maskformer_transformer_decoder import StandardTransformerDecoder
from .mask2former_transformer_decoder import MultiScaleMaskedTransformerDecoder
| CutLER-main | videocutler/mask2former/modeling/transformer_decoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if mask is not None:
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(
tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
for layer in self.layers:
output = layer(
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| CutLER-main | videocutler/mask2former/modeling/transformer_decoder/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
# This is a modified FPN decoder.
@SEM_SEG_HEADS_REGISTRY.register()
class BasePixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_channels = [v.channels for k, v in input_shape]
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(feature_channels):
if idx == len(self.in_features) - 1:
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
in_channels,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(None)
output_convs.append(output_conv)
else:
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.mask_dim = mask_dim
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=1,
padding=1,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
y = output_conv(x)
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
return self.mask_features(y), None, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
class TransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
if mask is not None:
mask = mask.flatten(1)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
return memory.permute(1, 2, 0).view(bs, c, h, w)
# This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map.
@SEM_SEG_HEADS_REGISTRY.register()
class TransformerEncoderPixelDecoder(BasePixelDecoder):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
transformer_pre_norm: bool,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
transformer_pre_norm: whether to use pre-layernorm or not
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm)
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
in_channels = feature_channels[len(self.in_features) - 1]
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
self.transformer = TransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
normalize_before=transformer_pre_norm,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# update layer
use_bias = norm == ""
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
delattr(self, "layer_{}".format(len(self.in_features)))
self.add_module("layer_{}".format(len(self.in_features)), output_conv)
self.output_convs[0] = output_conv
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
transformer = self.input_proj(x)
pos = self.pe_layer(x)
transformer = self.transformer(transformer, None, pos)
y = output_conv(transformer)
# save intermediate feature as input to Transformer decoder
transformer_encoder_features = transformer
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
return self.mask_features(y), transformer_encoder_features, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
| CutLER-main | videocutler/mask2former/modeling/pixel_decoder/fpn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| CutLER-main | videocutler/mask2former/modeling/pixel_decoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import _get_clones, _get_activation_fn
from .ops.modules import MSDeformAttn
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu",
num_feature_levels=4, enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
@SEM_SEG_HEADS_REGISTRY.register()
class MSDeformAttnPixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
# deformable transformer encoder args
transformer_in_features: List[str],
common_stride: int,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
transformer_input_shape = {
k: v for k, v in input_shape.items() if k in transformer_in_features
}
# this is the input shape of pixel decoder
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
self.feature_strides = [v.stride for k, v in input_shape]
self.feature_channels = [v.channels for k, v in input_shape]
# this is the input shape of transformer encoder (could use less features than pixel decoder
transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride)
self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5"
transformer_in_channels = [v.channels for k, v in transformer_input_shape]
self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers
self.transformer_num_feature_levels = len(self.transformer_in_features)
if self.transformer_num_feature_levels > 1:
input_proj_list = []
# from low resolution to high resolution (res5 -> res2)
for in_channels in transformer_in_channels[::-1]:
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
))
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
)])
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
self.transformer = MSDeformAttnTransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
num_feature_levels=self.transformer_num_feature_levels,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
self.mask_dim = mask_dim
# use 1x1 conv instead
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
self.common_stride = common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
# ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES
ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE
return ret
@autocast(enabled=False)
def forward_features(self, features):
srcs = []
pos = []
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.transformer_in_features[::-1]):
x = features[f].float() # deformable detr does not support half precision
srcs.append(self.input_proj[idx](x))
pos.append(self.pe_layer(x))
y, spatial_shapes, level_start_index = self.transformer(srcs, pos)
bs = y.shape[0]
split_size_or_sections = [None] * self.transformer_num_feature_levels
for i in range(self.transformer_num_feature_levels):
if i < self.transformer_num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]):
x = features[f].float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return self.mask_features(out[-1]), out[0], multi_scale_features
| CutLER-main | videocutler/mask2former/modeling/pixel_decoder/msdeformattn.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.