python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from utils import file_tqdm, get_dfs, separate_dps
logging.basicConfig(level=logging.INFO)
def get_leaf_info(ast):
leaf_tokens = []
leaf_ids = []
for i, node in enumerate(ast):
if "value" in node:
leaf_ids.append(i)
leaf_tokens.append(node["value"])
return leaf_tokens, leaf_ids
def get_dps(ast, max_len):
leaf_tokens, leaf_ids = get_leaf_info(ast)
if len(leaf_tokens) <= max_len:
return [[leaf_tokens, 0]], [leaf_ids]
half_len = int(max_len / 2)
aug_tokens = [[leaf_tokens[:max_len], 0]]
aug_leaf_ids = [leaf_ids[:max_len]]
i = half_len
while i < len(leaf_tokens) - max_len:
aug_tokens.append([leaf_tokens[i : i + max_len], half_len])
aug_leaf_ids.append(leaf_ids[i : i + max_len])
i += half_len
idx = max_len - (len(leaf_tokens) - (i + half_len))
aug_tokens.append([leaf_tokens[-max_len:], idx])
aug_leaf_ids.append(leaf_ids[-max_len:])
return aug_tokens, aug_leaf_ids
def main():
parser = argparse.ArgumentParser(description="Generate datapoints from AST")
parser.add_argument("--ast_fp", "-a", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--n_ctx", "-c", type=int, default=1000, help="Number of contexts for each dp"
)
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath with the output dps"
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Writing dps to: {}".format(args.out_fp))
num_dps = 0
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
dp = json.loads(line.strip())
aug_tokens, aug_leaf_ids = get_dps(dp, args.n_ctx)
for (tokens, ext), leaf in zip(aug_tokens, aug_leaf_ids):
if len(tokens) > 1:
json.dump([tokens, ext], fp=fout)
fout.write("\n")
num_dps += 1
logging.info("Wrote {} datapoints to {}".format(num_dps, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/path_trans_variation/generate_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import pickle
import re
from collections import Counter
from utils import get_terminal_nodes, file_tqdm, tokenize
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
PLACEHOLDER = "<placeholder_token>"
def get_value(line, vocab_type):
if vocab_type == "token":
return get_dfs(line)
elif vocab_type == "subtoken":
lst = []
for node in get_terminal_nodes(line):
lst += tokenize(node)
return lst
elif vocab_type == "output":
return get_terminal_nodes(line)
def main():
parser = argparse.ArgumentParser(
description="Create vocab for code2seq model for py150 dataset"
)
parser.add_argument("--n_vocab", "-n", type=int, default=100000)
parser.add_argument("--input_fp", "-i")
parser.add_argument("--out_fp", "-o", default="/tmp/vocab.pkl")
parser.add_argument(
"--vocab_type",
"-v",
choices=["token", "subtoken", "output"],
help="What type of vocab to get",
)
args = parser.parse_args()
logging.info("Reading from: {}".format(args.input_fp))
logging.info("Vocab type: {}".format(args.vocab_type))
vocab = Counter()
with open(args.input_fp, "r") as f:
for line in file_tqdm(f):
vocab.update(get_value(json.loads(line.strip()), args.vocab_type))
vocab_to_keep = [i[0] for i in vocab.most_common(args.n_vocab)]
top_total = sum(i[1] for i in vocab.most_common(args.n_vocab))
total = sum(vocab.values())
logging.info("Total # of vocab: {}".format(len(vocab)))
logging.info(
"Using {} top vocab covers: {:.2f}% of the entire dataset".format(
args.n_vocab, 100 * top_total / total
)
)
logging.info("Top 10 most common vocab:")
for v, i in vocab.most_common(10):
print(v, i)
# add unk and pad tokens
vocab_to_keep.append(UNK)
vocab_to_keep.append(PAD)
vocab_to_keep.append(PLACEHOLDER)
logging.info("Added {} and {} and {}".format(UNK, PAD, PLACEHOLDER))
# dump vocab to file
with open(args.out_fp, "wb") as fout:
pickle.dump(vocab_to_keep, fout)
logging.info("Wrote {} vocab to: {}".format(len(vocab_to_keep), args.out_fp))
if __name__ == "__main__":
main() | code-prediction-transformer-main | models/code2seq/generate_vocab.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import torch
import utils
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
PLACEHOLDER = "<placeholder_token>"
class Dataset(torch.utils.data.Dataset):
def __init__(self, fp):
super().__init__()
self.fp = fp
self._line_pos_dp = list(utils.line_positions(fp))
def __len__(self):
return len(self._line_pos_dp)
def __getitem__(self, idx):
line_pos = self._line_pos_dp[idx]
with open(self.fp) as f:
f.seek(line_pos)
dp_line = json.loads(f.readline().strip())
return dp_line
@staticmethod
def collate(batch, token_pad_idx, subtoken_pad_idx):
def combine(seqs, max_len, max_path_len, pad_idx):
if not seqs:
return torch.ones((max_len, max_path_len)).long() * pad_idx
paths = []
for path in seqs:
paths.append(path + [pad_idx] * (max_path_len - len(path)))
len_pad = torch.ones((max_len - len(paths), max_path_len)).long()
return torch.cat((torch.tensor(paths), len_pad))
max_len = max(len(i[1]) for i in batch)
max_start_len = max(
max([len(start) for start in seq[1]], default=0) for seq in batch
)
max_path_len = max(
max([len(path) for path in seq[2]], default=0) for seq in batch
)
max_end_len = max(
max([len(start) for start in seq[3]], default=0) for seq in batch
)
all_targets = []
all_starts = []
all_paths = []
all_ends = []
for (target, starts, paths, ends) in batch:
all_targets.append(target)
starts = combine(starts, max_len, max_start_len, subtoken_pad_idx)
paths = combine(paths, max_len, max_path_len, token_pad_idx)
ends = combine(ends, max_len, max_end_len, subtoken_pad_idx)
all_starts.append(starts)
all_ends.append(ends)
all_paths.append(paths)
results = {
"targets": torch.tensor(all_targets),
"starts": torch.stack(all_starts),
"paths": torch.stack(all_paths),
"ends": torch.stack(all_ends),
}
return results
| code-prediction-transformer-main | models/code2seq/dataset.py |
import argparse
import json
import os
import pickle
import random
import re
from collections import defaultdict
from itertools import chain, combinations, product
from utils import get_ancestors, get_terminal_nodes, parallelize, tokenize
from tqdm import tqdm
PLACEHOLDER = "<placeholder_token>"
UNK = "<unk_token>"
def get_leaf_nodes(ast, id_type):
# get ids for special leaf types: attr, num, name, param
if id_type == "attr":
types_ = {"attr"}:
elif id_type == "num":
types_ = {"Num"}
elif id_type == "name":
types_ = {"NameLoad", "NameStore"}
elif id_type == "param":
types_ = {"NameParam"}
nodes = []
for i, node in enumerate(ast):
if "type" in node and node["type"] in types_:
nodes.append(i + 1)
return nodes
def get_value(d):
return d["value"] if "value" in d else d["type"]
def extract_paths(ast, max_length):
def dfs(i):
node = ast[i]
if "children" not in node:
full_paths = []
half_paths = [[i]]
else:
children = node["children"]
child_to_full_paths, child_to_half_paths = zip(
*(dfs(child_id) for child_id in children)
)
full_paths = list(chain.from_iterable(child_to_full_paths))
for i_child in range(len(children) - 1):
for j_child in range(i_child + 1, len(children)):
i_child_half_paths = child_to_half_paths[i_child]
j_child_half_paths = child_to_half_paths[j_child]
for i_half_path, j_half_path in product(
i_child_half_paths, j_child_half_paths
):
path_len = len(i_half_path) + len(j_half_path) + 1
if path_len > max_length:
continue
path = list(chain(i_half_path, [i], reversed(j_half_path)))
full_paths.append(path)
half_paths = [
half_path + [i]
for half_path in chain.from_iterable(child_to_half_paths)
if len(half_path) + 1 < max_length
]
return full_paths, half_paths
return dfs(0)[0]
def get_all_paths(ast, id_type, max_path_len, max_num_paths):
if id_type == "leaves":
nodes = get_terminal_nodes(ast)
else:
nodes = get_leaf_nodes(ast, id_type)
if not nodes:
return []
all_paths = extract_paths(ast, max_path_len)
ast_values = [get_value(i) for i in ast]
terminal_words = [get_value(ast[i]) for i in get_terminal_nodes(ast)]
tokenized_words = {word: tokenize(word) for word in terminal_words}
node_to_path_idx = {i: [] for i in range(len(ast))}
for i, path in enumerate(all_paths):
node_to_path_idx[path[-1]].append(i)
dps = []
paths_to_choose_from = []
prev_node = 0
for node in nodes:
for j in range(prev_node, node):
paths_to_choose_from += [
all_paths[path_i] for path_i in node_to_path_idx[j]
]
prev_node = node
paths_to_here = [all_paths[path_i] for path_i in node_to_path_idx[node]]
if len(paths_to_choose_from) + len(paths_to_here) <= max_num_paths:
paths = paths_to_choose_from.copy() + paths_to_here
else:
if len(paths_to_here) > max_num_paths:
paths = random.sample(paths_to_here, max_num_paths)
else:
paths = paths_to_here + random.sample(
paths_to_choose_from, max_num_paths - len(paths_to_here)
)
# convert to vocab
target = ast_values[node]
paths = [
[ast_values[i] if i != node else PLACEHOLDER for i in p] for p in paths
]
lefts = [tokenized_words[p[0]] for p in paths]
rights = [
tokenized_words[p[-1]] if p[-1] != PLACEHOLDER else [PLACEHOLDER]
for p in paths
]
dps.append([target, lefts, paths, rights])
return dps
def get_word2idx(out_fp):
with open(out_fp, "rb") as fin:
vocab = pickle.load(fin)
word2idx = {word: i for i, word in enumerate(vocab)}
word2idx = defaultdict(lambda: word2idx[UNK], word2idx)
print("Read vocab from: {}".format(out_fp))
return word2idx
def main():
parser = argparse.ArgumentParser(
description="Generate terminal to terminal paths from AST"
)
parser.add_argument("--ast_fp", "-a", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath for the output dps"
)
parser.add_argument("--max_path_len", type=int, default=9, help="Max path len.")
parser.add_argument("--max_num_paths", type=int, default=200)
parser.add_argument("--base_dir", "-b", type=str)
parser.add_argument(
"id_type",
choices=["attr", "num", "name", "param", "leaves"],
default="attr",
help="Which ids to generate. Default = attr",
)
args = parser.parse_args()
print("Max path len: {}".format(args.max_path_len))
print("Max num paths: {}".format(args.max_num_paths))
print("Writing to {}".format(args.out_fp))
# read the vocabs
base_dir = args.base_dir
token_vocab = get_word2idx(os.path.join(base_dir, "token_vocab.pkl"))
subtoken_vocab = get_word2idx(os.path.join(base_dir, "subtoken_vocab.pkl"))
output_vocab = get_word2idx(os.path.join(base_dir, "output_vocab.pkl"))
data = []
i = 0
c = 0
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for _ in range(20):
i += 1
print("Starting {} / 50".format(i))
for _ in range(5000):
dp = json.loads(f.readline().strip())
if len(dp) <= 1:
continue
data.append(dp)
print(" > Finished reading: {}".format(len(data)))
for ast in tqdm(data):
dp = get_all_paths(ast, args.id_type, args.max_path_len, args.max_num_paths)
for target, lefts, paths, rights in dp:
target = output_vocab[target]
lefts = [[subtoken_vocab[t] for t in lst] for lst in lefts]
paths = [[token_vocab[t] for t in lst] for lst in paths]
rights = [[subtoken_vocab[t] for t in lst] for lst in rights]
json.dump([target, lefts, paths, rights], fout)
fout.write("\n")
c += 1
data = []
print(" > Finished writing to file")
print("Wrote {} datapoints to {}".format(c, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/code2seq/generate_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from dataset.dataset import BaseDataset, BaseSetup, BaseVocab
class Setup(BaseSetup):
def _create_vocab(self):
return Vocab(self.filepaths["vocab"])
def _create_dataset(self, fp, ids_fp):
return Dataset(fp, ids_fp)
class Vocab(BaseVocab):
def convert(self, line):
dp, ext = line
dp_conv = [
self.vocab2idx[token] if token in self.vocab2idx else self.unk_idx
for token in dp
]
return [dp_conv, ext]
class Dataset(BaseDataset):
@staticmethod
def collate(seqs, pad_idx):
max_len = max(len(seq[0][0]) for seq in seqs)
max_len = max(max_len, 2)
input_seqs = []
target_seqs = []
extended = []
ids = {name: [] for name in seqs[0][1].keys()}
for i, ((seq, ext), ids_lst) in enumerate(seqs):
padding = [pad_idx] * (max_len - len(seq))
input_seqs.append(seq[:-1] + padding)
target_seqs.append(seq[1:] + padding)
extended.append(ext)
for name, lst in ids_lst.items():
ids[name] += [j - 1 + (max_len - 1) * i for j in lst]
return {
"input_seq": torch.tensor(input_seqs),
"target_seq": torch.tensor(target_seqs),
"extended": torch.tensor(extended),
"ids": ids,
}
| code-prediction-transformer-main | models/seq/dataset.py |
#!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import json
import logging
import os
from collections import namedtuple
import six
from utils import file_tqdm, separate_dps
from models.source_code.astunparser import Unparser
SrcASTToken = namedtuple("SrcASTToken", "text type")
logging.basicConfig(level=logging.INFO)
def get_leaf_ids(types_):
ids = {"leaf_ids": []}
for i, v in enumerate(types_):
if v is not None:
ids["leaf_ids"].append(i)
return ids
def get_value_ids(types_):
ids = {"attr_ids": [], "num_ids": [], "name_ids": [], "param_ids": []}
for i, v in enumerate(types_):
if v == "attr":
ids["attr_ids"].append(i)
elif v == "Num":
ids["num_ids"].append(i)
elif v in {"NameStore", "NameLoad"}:
ids["name_ids"].append(i)
elif v == "NameParam":
ids["param_ids"].append(i)
return ids
class MyListFile(list):
def write(self, text, type=None):
text = text.strip()
if len(text) > 0:
self.append(SrcASTToken(text, type))
def flush(self):
pass
def transpose(self, max_len):
tokens = [tt.text for tt in self]
types_ = [tt.type for tt in self]
return separate_dps(tokens, max_len), separate_dps(types_, max_len)
def my_tokenize(code_str, n_ctx):
t = ast.parse(code_str)
lst = MyListFile()
Unparser(t, lst)
return lst.transpose(n_ctx)
def main():
parser = argparse.ArgumentParser(description="Generate datapoints from source code")
parser.add_argument(
"--files_fp", "-f", help="Filepath with the filenames to be parsed"
)
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath with the output dps"
)
parser.add_argument("--base_dir", "-b", help="Base dir to append for the fps")
parser.add_argument(
"--n_ctx", "-c", type=int, default=1000, help="Number of contexts for each dp"
)
parser.add_argument(
"id_type",
choices=["leaf", "value", "token", "all"],
default="",
help="Which ids to generate. Default = get the tokens",
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Number of context: {}".format(args.n_ctx))
num_dps = 0
logging.info("Loading files from: {}".format(args.base_dir))
with open(args.files_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
fp = os.path.join(args.base_dir, line.strip())
try:
aug_tokens, aug_types = my_tokenize(open(fp).read(), args.n_ctx)
for (tokens, ext), (types_, _) in zip(aug_tokens, aug_types):
if len(tokens) > 1:
if args.id_type == "leaf":
json.dump(get_leaf_ids(types_), fp=fout)
elif args.id_type == "value":
json.dump(get_value_ids(types_), fp=fout)
elif args.id_type == "all":
ids = get_leaf_ids(types_)
ids.update(get_value_ids(types_))
json.dump(ids, fp=fout)
else:
json.dump([tokens, ext], fp=fout)
fout.write("\n")
num_dps += 1
except:
continue
logging.info("Wrote {} datapoints to {}".format(num_dps, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/seq/generate_data.py |
#!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import ast
import sys
import six
from six import StringIO
# Large float and imaginary literals get turned into infinities in the AST.
# We unparse those infinities to INFSTR.
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between.
"""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class Unparser:
"""Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded. """
def __init__(self, tree, file=sys.stdout):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self.future_imports = []
self._indent = 0
self.dispatch(tree)
self.f.write("\n")
# print("", file=self.f)
self.f.flush()
def fill(self, text=""):
"Indent a piece of text, according to the current indentation level"
self.f.write("\n" + " " * self._indent + text)
def write(self, text, type=None):
"Append a piece of text to the current line."
self.f.write(six.text_type(text), type)
def enter(self):
"Print ':', and increase the indentation."
self.write(":")
self._indent += 1
def leave(self):
"Decrease the indentation level."
self._indent -= 1
def dispatch(self, tree):
"Dispatcher function, dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_" + tree.__class__.__name__)
meth(tree)
############### Unparsing methods ######################
# There should be one method per concrete grammar type #
# Constructors should be grouped by sum type. Ideally, #
# this would follow the order in the grammar, but #
# currently doesn't. #
########################################################
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Interactive(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
def _Expression(self, tree):
self.dispatch(tree.body)
# stmt
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
def _NamedExpr(self, tree):
self.write("(")
self.dispatch(tree.target)
self.write(" := ")
self.dispatch(tree.value)
self.write(")")
def _Import(self, t):
self.fill("import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _ImportFrom(self, t):
# A from __future__ import may affect unparsing, so record it.
if t.module and t.module == "__future__":
self.future_imports.extend(n.name for n in t.names)
self.fill("from ")
self.write("." * t.level)
# NOTE: `level` is not stored as values in py150 trees.
# This will make `from ..package.name import sth` appear the same as
# `from .package.name import sth` or `from package.name import sth`.
if t.module:
# NOTE: Reason: Use class name. `parse_python.py:L66-69`.
self.write(t.module, type="ImportFrom")
self.write(" import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _Assign(self, t):
self.fill()
for target in t.targets:
self.dispatch(target)
self.write(" = ")
self.dispatch(t.value)
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
self.write(" ")
self.write(self.binop[t.op.__class__.__name__])
self.write("= ")
self.dispatch(t.value)
def _AnnAssign(self, t):
self.fill()
if not t.simple and isinstance(t.target, ast.Name):
self.write("(")
self.dispatch(t.target)
if not t.simple and isinstance(t.target, ast.Name):
self.write(")")
self.write(": ")
self.dispatch(t.annotation)
if t.value:
self.write(" = ")
self.dispatch(t.value)
def _Return(self, t):
self.fill("return")
if t.value:
self.write(" ")
self.dispatch(t.value)
def _Pass(self, t):
self.fill("pass")
def _Break(self, t):
self.fill("break")
def _Continue(self, t):
self.fill("continue")
def _Delete(self, t):
self.fill("del ")
interleave(lambda: self.write(", "), self.dispatch, t.targets)
def _Assert(self, t):
self.fill("assert ")
self.dispatch(t.test)
if t.msg:
self.write(", ")
self.dispatch(t.msg)
def _Exec(self, t):
self.fill("exec ")
self.dispatch(t.body)
if t.globals:
self.write(" in ")
self.dispatch(t.globals)
if t.locals:
self.write(", ")
self.dispatch(t.locals)
def _Print(self, t):
self.fill("print ")
do_comma = False
if t.dest:
self.write(">>")
self.dispatch(t.dest)
do_comma = True
for e in t.values:
if do_comma:
self.write(", ")
else:
do_comma = True
self.dispatch(e)
if not t.nl:
self.write(",")
def _Global(self, t):
self.fill("global ")
# NOTE: Reason: Use "identifier". `parse_python.py:L71`.
interleave(lambda: self.write(", "), lambda x: self.write(x, type="identifier"), t.names)
def _Nonlocal(self, t):
# NOTE: This is not part of PY2.
# NOTE: Reason: Use "identifier". Similar to the case of "Global".
self.fill("nonlocal ")
interleave(lambda: self.write(", "), lambda x: self.write(x, type="identifier"), t.names)
def _Await(self, t):
self.write("(")
self.write("await")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Yield(self, t):
self.write("(")
self.write("yield")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _YieldFrom(self, t):
self.write("(")
self.write("yield from")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Raise(self, t):
self.fill("raise")
if six.PY3:
if not t.exc:
assert not t.cause
return
self.write(" ")
self.dispatch(t.exc)
if t.cause:
self.write(" from ")
self.dispatch(t.cause)
else:
self.write(" ")
if t.type:
self.dispatch(t.type)
if t.inst:
self.write(", ")
self.dispatch(t.inst)
if t.tback:
self.write(", ")
self.dispatch(t.tback)
def _Try(self, t):
self.fill("try")
self.enter()
self.dispatch(t.body)
self.leave()
for ex in t.handlers:
self.dispatch(ex)
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
if t.finalbody:
self.fill("finally")
self.enter()
self.dispatch(t.finalbody)
self.leave()
def _TryExcept(self, t):
self.fill("try")
self.enter()
self.dispatch(t.body)
self.leave()
for ex in t.handlers:
self.dispatch(ex)
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _TryFinally(self, t):
if len(t.body) == 1 and isinstance(t.body[0], ast.TryExcept):
# try-except-finally
self.dispatch(t.body)
else:
self.fill("try")
self.enter()
self.dispatch(t.body)
self.leave()
self.fill("finally")
self.enter()
self.dispatch(t.finalbody)
self.leave()
def _ExceptHandler(self, t):
self.fill("except")
if t.type:
self.write(" ")
self.dispatch(t.type)
if t.name:
self.write(" as ")
if six.PY3:
# NOTE: This is not part of PY2.
# NOTE: Reason: Use "identifier". Similar to the case of "Global".
self.write(t.name, type="identifier")
else:
self.dispatch(t.name)
self.enter()
self.dispatch(t.body)
self.leave()
def _ClassDef(self, t):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
self.fill("class ")
# NOTE: Reason: Use class name. `parse_python.py:L64-65`.
self.write(t.name, type="ClassDef")
if six.PY3:
self.write("(")
comma = False
for e in t.bases:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
for e in t.keywords:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
if sys.version_info[:2] < (3, 5):
if t.starargs:
if comma:
self.write(", ")
else:
comma = True
self.write("*")
self.dispatch(t.starargs)
if t.kwargs:
if comma:
self.write(", ")
else:
comma = True
self.write("**")
self.dispatch(t.kwargs)
self.write(")")
elif t.bases:
self.write("(")
for a in t.bases:
self.dispatch(a)
self.write(", ")
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
def _FunctionDef(self, t):
self.__FunctionDef_helper(t, "def")
def _AsyncFunctionDef(self, t):
self.__FunctionDef_helper(t, "async def")
def __FunctionDef_helper(self, t, fill_suffix):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
self.fill(fill_suffix)
self.write(" ")
# NOTE: Reason: Use class name. `parse_python.py:L62-63`
self.write(t.name, type="FunctionDef")
self.write("(")
self.dispatch(t.args)
self.write(")")
if getattr(t, "returns", False):
self.write(" -> ")
self.dispatch(t.returns)
self.enter()
self.dispatch(t.body)
self.leave()
def _For(self, t):
self.__For_helper("for ", t)
def _AsyncFor(self, t):
self.__For_helper("async for ", t)
def __For_helper(self, fill, t):
self.fill(fill)
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _If(self, t):
self.fill("if ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If):
t = t.orelse[0]
self.fill("elif ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
self.fill("while ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _generic_With(self, t, async_=False):
self.fill("async with " if async_ else "with ")
if hasattr(t, "items"):
interleave(lambda: self.write(", "), self.dispatch, t.items)
else:
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(" as ")
self.dispatch(t.optional_vars)
self.enter()
self.dispatch(t.body)
self.leave()
def _With(self, t):
self._generic_With(t)
def _AsyncWith(self, t):
self._generic_With(t, async_=True)
# expr
def _Bytes(self, t):
# NOTE: This is not part of PY2 and will be removed in PY3.8+.
# NOTE: Reason: Use class name. Similar to the case of "Str".
self.write(repr(t.s), type="Bytes")
def _Str(self, tree):
# NOTE: This will be removed in PY3.8+.
# NOTE: Reason: Use class name. `parse_python.py:L56-57`.
if six.PY3:
self.write(repr(tree.s), type="Str")
else:
# NOTE: py150 nodes will keep string in value form, not repr form.
# We keep this part as it is to preserve consistency after training.
# -----
# if from __future__ import unicode_literals is in effect,
# then we want to output string literals using a 'b' prefix
# and unicode literals with no prefix.
if "unicode_literals" not in self.future_imports:
self.write(repr(tree.s), type="Str")
elif isinstance(tree.s, str):
self.write("b" + repr(tree.s), type="Str")
elif isinstance(tree.s, unicode):
self.write(repr(tree.s).lstrip("u"), type="Str")
else:
assert False, "shouldn't get here"
def _JoinedStr(self, t):
# NOTE: This is not part of PY2.
# JoinedStr(expr* values)
self.write("f")
string = StringIO()
self._fstring_JoinedStr(t, string.write)
# Deviation from `unparse.py`: Try to find an unused quote.
# This change is made to handle _very_ complex f-strings.
v = string.getvalue()
if "\n" in v or "\r" in v:
quote_types = ["'''", '"""']
else:
quote_types = ["'", '"', '"""', "'''"]
for quote_type in quote_types:
if quote_type not in v:
v = "{quote_type}{v}{quote_type}".format(quote_type=quote_type, v=v)
break
else:
v = repr(v)
# NOTE: Reason: Use class name. Similar to the case of "Str".
self.write(v, type="JoinedStr")
def _FormattedValue(self, t):
# NOTE: This is not part of PY2.
# FormattedValue(expr value, int? conversion, expr? format_spec)
self.write("f")
string = StringIO()
self._fstring_JoinedStr(t, string.write)
# NOTE: Reason: Use class name. Similar to the case of "Str".
self.write(repr(string.getvalue()), type="FormattedValue")
def _fstring_JoinedStr(self, t, write):
for value in t.values:
meth = getattr(self, "_fstring_" + type(value).__name__)
meth(value, write)
def _fstring_Str(self, t, write):
value = t.s.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_Constant(self, t, write):
assert isinstance(t.value, str)
value = t.value.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_FormattedValue(self, t, write):
write("{")
expr = StringIO()
Unparser(t.value, expr)
expr = expr.getvalue().rstrip("\n")
if expr.startswith("{"):
write(" ") # Separate pair of opening brackets as "{ {"
write(expr)
if t.conversion != -1:
conversion = chr(t.conversion)
assert conversion in "sra"
write("!{conversion}".format(conversion=conversion))
if t.format_spec:
write(":")
meth = getattr(self, "_fstring_" + type(t.format_spec).__name__)
meth(t.format_spec, write)
write("}")
def _Name(self, t):
# NOTE: PY2, PY3 grammar: Name(identifier id, expr_context ctx)
# NOTE: Reason: Use class name + context name. `parse_python.py:L125-127`.
# ETH parser merges the value of `expr_context` into its parent node.
# From [PY2 grammar](https://docs.python.org/2/library/ast.html#abstract-grammar):
# ```
# expr_context = Load | Store | Del | AugLoad | AugStore | Param
# ```
self.write(t.id, type="Name" + t.ctx.__class__.__name__)
def _NameConstant(self, t):
# NOTE: This is not part of PY2 and will be removed PY3.8+.
# NOTE: Use class name. Similar to the case of "str".
self.write(repr(t.value), type="NameConstant")
def _Repr(self, t):
self.write("`")
self.dispatch(t.value)
self.write("`")
def _write_constant(self, value):
# NOTE: Reason: Use class name. Similar to the case of "Str".
if isinstance(value, (float, complex)):
# Substitute overflowing decimal literal for AST infinities.
self.write(repr(value).replace("inf", INFSTR), type="Constant")
else:
self.write(repr(value), type="Constant")
def _Constant(self, t):
# NOTE: This is not part of PY2 and will be removed PY3.8+.
value = t.value
if isinstance(value, tuple):
self.write("(")
if len(value) == 1:
self._write_constant(value[0])
self.write(",")
else:
interleave(lambda: self.write(", "), self._write_constant, value)
self.write(")")
elif value is Ellipsis: # instead of `...` for Py2 compatibility
self.write("...")
else:
if t.kind == "u":
self.write("u")
self._write_constant(t.value)
def _Num(self, t):
# NOTE: Reason: Use class name. `parse_python.py:L54-55`.
# NOTE: Here we use `repr()` while `parse_python.py` uses `unicode()`.
# This causes disparity such as:
# | value | repr() | str() | unicode() | notes |
# | ----- | ------ | ----- | --------- | ----- |
# | 1L | '1L' | '1' | u'1' | long int |
# | 3e13 | '30000000000000.0' | '3e+13' | u'3e+13' | exponent notation |
# | 1254213006.517507 | '1254213006.517507' | '1254213006.52' | u'1254213006.52' | floating point precision |
# Here we keep the part as it is to preserve consistency.
repr_n = repr(t.n)
if six.PY3:
self.write(repr_n.replace("inf", INFSTR), type="Num")
else:
# Parenthesize negative numbers, to avoid turning (-1)**2 into -1**2.
if repr_n.startswith("-"):
self.write("(")
if "inf" in repr_n and repr_n.endswith("*j"):
repr_n = repr_n.replace("*j", "j")
# Substitute overflowing decimal literal for AST infinities.
self.write(repr_n.replace("inf", INFSTR), type="Num")
if repr_n.startswith("-"):
self.write(")")
def _List(self, t):
self.write("[")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("]")
def _ListComp(self, t):
self.write("[")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("]")
def _GeneratorExp(self, t):
self.write("(")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write(")")
def _SetComp(self, t):
self.write("{")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _DictComp(self, t):
self.write("{")
self.dispatch(t.key)
self.write(": ")
self.dispatch(t.value)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _comprehension(self, t):
if getattr(t, "is_async", False):
self.write(" async for ")
else:
self.write(" for ")
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
for if_clause in t.ifs:
self.write(" if ")
self.dispatch(if_clause)
def _IfExp(self, t):
self.write("(")
self.dispatch(t.body)
self.write(" if ")
self.dispatch(t.test)
self.write(" else ")
self.dispatch(t.orelse)
self.write(")")
def _Set(self, t):
assert t.elts # should be at least one element
self.write("{")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("}")
def _Dict(self, t):
self.write("{")
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(": ")
self.dispatch(v)
def write_item(item):
k, v = item
if k is None:
# for dictionary unpacking operator in dicts {**{'y': 2}}
# see PEP 448 for details
self.write("**")
self.dispatch(v)
else:
write_key_value_pair(k, v)
interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values))
self.write("}")
def _Tuple(self, t):
self.write("(")
if len(t.elts) == 1:
elt = t.elts[0]
self.dispatch(elt)
self.write(",")
else:
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(")")
unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
def _UnaryOp(self, t):
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.write(" ")
if six.PY2 and isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num):
# If we're applying unary minus to a number, parenthesize the number.
# This is necessary: -2147483648 is different from -(2147483648) on
# a 32-bit machine (the first is an int, the second a long), and
# -7j is different from -(7j). (The first has real part 0.0, the second
# has real part -0.0.)
self.write("(")
self.dispatch(t.operand)
self.write(")")
else:
self.dispatch(t.operand)
self.write(")")
binop = {
"Add": "+",
"Sub": "-",
"Mult": "*",
"MatMult": "@",
"Div": "/",
"Mod": "%",
"LShift": "<<",
"RShift": ">>",
"BitOr": "|",
"BitXor": "^",
"BitAnd": "&",
"FloorDiv": "//",
"Pow": "**",
}
def _BinOp(self, t):
self.write("(")
self.dispatch(t.left)
self.write(" ")
self.write(self.binop[t.op.__class__.__name__])
self.write(" ")
self.dispatch(t.right)
self.write(")")
cmpops = {
"Eq": "==",
"NotEq": "!=",
"Lt": "<",
"LtE": "<=",
"Gt": ">",
"GtE": ">=",
"Is": "is",
"IsNot": "is not",
"In": "in",
"NotIn": "not in",
}
def _Compare(self, t):
self.write("(")
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(" ")
self.write(self.cmpops[o.__class__.__name__])
self.write(" ")
self.dispatch(e)
self.write(")")
boolops = {ast.And: "and", ast.Or: "or"}
def _BoolOp(self, t):
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self, t):
self.dispatch(t.value)
# Special case: 3.__abs__() is a syntax error, so if t.value
# is an integer literal then we need to either parenthesize
# it or add an extra space to get 3 .__abs__().
if isinstance(
t.value, getattr(ast, "Constant", getattr(ast, "Num", None))
) and isinstance(t.value.n, int):
self.write(" ")
self.write(".")
# NOTE: Reason: Special case. `parse_python.py:L131-132`.
self.write(t.attr, type="attr")
def _Call(self, t):
self.dispatch(t.func)
self.write("(")
comma = False
for e in t.args:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
for e in t.keywords:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
if sys.version_info[:2] < (3, 5):
if t.starargs:
if comma:
self.write(", ")
else:
comma = True
self.write("*")
self.dispatch(t.starargs)
if t.kwargs:
if comma:
self.write(", ")
else:
comma = True
self.write("**")
self.dispatch(t.kwargs)
self.write(")")
def _Subscript(self, t):
self.dispatch(t.value)
self.write("[")
self.dispatch(t.slice)
self.write("]")
def _Starred(self, t):
self.write("*")
self.dispatch(t.value)
# slice
def _Ellipsis(self, t):
self.write("...")
def _Index(self, t):
self.dispatch(t.value)
def _Slice(self, t):
if t.lower:
self.dispatch(t.lower)
self.write(":")
if t.upper:
self.dispatch(t.upper)
if t.step:
self.write(":")
self.dispatch(t.step)
def _ExtSlice(self, t):
interleave(lambda: self.write(", "), self.dispatch, t.dims)
# argument
def _arg(self, t):
# NOTE: This is not part of PY2.
# NOTE: Reason: Use class name. Default behaviour of `parse_python.py`.
self.write(t.arg, type="arg")
if t.annotation:
self.write(": ")
self.dispatch(t.annotation)
# others
def _arguments(self, t):
# NOTE: PY2 grammar: arguments = (
# expr* args, identifier? vararg,
# identifier? kwarg, expr* defaults)
# NOTE: PY3.7 grammar: arguments = (
# arg* args, arg? vararg, arg* kwonlyargs,
# expr* kw_defaults, arg? kwarg, expr* defaults)
# NOTE: PY3.8 grammar: arguments = (
# arg* posonlyargs, arg* args, arg? vararg, arg* kwonlyargs,
# expr* kw_defaults, arg? kwarg, expr* defaults)
first = True
# normal arguments
# NOTE: `posonlyargs` is not part of PY2 and appears only starting PY3.8.
all_args = getattr(t, "posonlyargs", []) + t.args
defaults = [None] * (len(all_args) - len(t.defaults)) + t.defaults
for index, elements in enumerate(zip(all_args, defaults), 1):
a, d = elements
if first:
first = False
else:
self.write(", ")
self.dispatch(a)
if d:
self.write("=")
self.dispatch(d)
if index == len(getattr(t, "posonlyargs", ())):
self.write(", ")
self.write("/")
# varargs, or bare '*' if no varargs but keyword-only arguments present
# NOTE: `kwonlyargs` is not part of PY2.
if t.vararg or getattr(t, "kwonlyargs", False):
if first:
first = False
else:
self.write(", ")
self.write("*")
if t.vararg:
if hasattr(t.vararg, "arg"):
# NOTE: This is not part of PY2.
# NOTE: Reason: Special case. Following the case of "vararg".
self.write(t.vararg.arg, type="vararg")
if t.vararg.annotation:
self.write(": ")
self.dispatch(t.vararg.annotation)
else:
# NOTE: Reason: Special case. `parse_python.py:L105`.
self.write(t.vararg, type="vararg")
if getattr(t, "varargannotation", None):
self.write(": ")
self.dispatch(t.varargannotation)
# keyword-only arguments
if getattr(t, "kwonlyargs", False):
for a, d in zip(t.kwonlyargs, t.kw_defaults):
if first:
first = False
else:
self.write(", ")
self.dispatch(a),
if d:
self.write("=")
self.dispatch(d)
# kwargs
if t.kwarg:
if first:
first = False
else:
self.write(", ")
if hasattr(t.kwarg, "arg"):
# NOTE: This is not part of PY2.
self.write("**")
# NOTE: Reason: Special case. Following the case of "kwarg".
self.write(t.kwarg.arg, type="kwarg")
if t.kwarg.annotation:
self.write(": ")
self.dispatch(t.kwarg.annotation)
else:
self.write("**")
# NOTE: Reason: Special case. `parse_python.py:L107`.
self.write(t.kwarg, type="kwarg")
if getattr(t, "kwargannotation", None):
self.write(": ")
self.dispatch(t.kwargannotation)
def _keyword(self, t):
if t.arg is None:
# starting from Python 3.5 this denotes a kwargs part of the invocation
self.write("**")
else:
# NOTE: Reason: Use class name. `parse_python.py:L72-73`.
self.write(t.arg, type="keyword")
self.write("=")
self.dispatch(t.value)
def _Lambda(self, t):
self.write("(")
self.write("lambda ")
self.dispatch(t.args)
self.write(": ")
self.dispatch(t.body)
self.write(")")
def _alias(self, t):
# NOTE: Reason: Use class name. `parse_python.py:L59`.
self.write(t.name, type="alias")
if t.asname:
self.write(" as ")
# NOTE: Use "identifier". `parse_python.py:L61`.
self.write(t.asname, type="identifier")
def _withitem(self, t):
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(" as ")
self.dispatch(t.optional_vars)
| code-prediction-transformer-main | models/seq/astunparser.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from dataset.dataset import BaseDataset, BaseSetup, BaseVocab
class Setup(BaseSetup):
def _create_vocab(self):
return Vocab(self.filepaths["vocab"])
def _create_dataset(self, fp, ids_fp):
return Dataset(fp, ids_fp)
class Vocab(BaseVocab):
def convert(self, line):
dp, ext, root_paths = line
dp_conv = [
self.vocab2idx[token] if token in self.vocab2idx else self.unk_idx
for token in dp
]
root_paths_conv = [
[
self.vocab2idx[token] if token in self.vocab2idx else self.unk_idx
for token in path
]
for path in root_paths
]
return [dp_conv, ext, root_paths_conv]
class Dataset(BaseDataset):
@staticmethod
def collate(seqs, pad_idx, bos_idx=None):
def combine_root_paths(root_paths, max_len, max_path_len):
paths = []
for path in root_paths:
paths.append(path + [pad_idx] * (max_path_len - len(path)))
len_pad = torch.ones((max_len - len(paths), max_path_len)).long()
return torch.cat((torch.tensor(paths), len_pad))
max_len = max(len(seq[0][0]) for seq in seqs)
max_len = max(2, max_len)
max_path_len = max(max(len(path) for path in seq[0][2]) for seq in seqs)
max_path_len - max(2, max_path_len)
input_seqs = []
target_seqs = []
extended = []
root_path_seqs = []
ids = {name: [] for name in seqs[0][1].keys()}
for i, ((seq, ext, root_paths), ids_lst) in enumerate(seqs):
padding = [pad_idx] * (max_len - len(seq))
input_seqs.append(seq[:-1] + padding)
target_seqs.append(seq[1:] + padding)
extended.append(ext)
root_path_seqs.append(combine_root_paths(root_paths, max_len, max_path_len))
for name, lst in ids_lst.items():
ids[name] += [j - 1 + (max_len - 1) * i for j in lst]
return {
"input_seq": torch.tensor(input_seqs),
"target_seq": torch.tensor(target_seqs),
"extended": torch.tensor(extended),
"root_paths": torch.stack(root_path_seqs)[:, 1:, :],
"ids": ids,
}
| code-prediction-transformer-main | models/path_trans/dataset.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from utils import file_tqdm
logging.basicConfig(level=logging.INFO)
def get_leaf_info(ast):
leaf_tokens = []
leaf_ids = []
for i, node in enumerate(ast):
if "value" in node:
leaf_ids.append(i)
leaf_tokens.append(node["value"])
return leaf_tokens, leaf_ids
def get_ancestors(ast):
ancestors = {0: []}
node2parent = {0: 0}
for i, node in enumerate(ast):
if "children" in node:
for child in node["children"]:
node2parent[child] = i
token = node["value"] if "value" in node else node["type"]
ancestors[i] = [token] + ancestors[node2parent[i]]
return ancestors
def get_root_paths(ancestors, leaf_ids, max_path_len):
return [ancestors[i][1 :max_path_len + 1] for i in leaf_ids]
def get_dps(ast, max_len, max_path_len):
leaf_tokens, leaf_ids = get_leaf_info(ast)
ancestors = get_ancestors(ast)
if len(leaf_tokens) <= max_len:
return [[leaf_tokens, 0, get_root_paths(ancestors, leaf_ids, max_path_len)]]
half_len = int(max_len / 2)
aug_dps = [
[
leaf_tokens[:max_len],
0,
get_root_paths(ancestors, leaf_ids[:max_len], max_path_len),
]
]
i = half_len
while i < len(leaf_tokens) - max_len:
aug_dps.append(
[
leaf_tokens[i : i + max_len],
half_len,
get_root_paths(ancestors, leaf_ids[i : i + max_len], max_path_len),
]
)
i += half_len
idx = max_len - (len(leaf_tokens) - (i + half_len))
aug_dps.append(
[
leaf_tokens[-max_len:],
idx,
get_root_paths(ancestors, leaf_ids[-max_len:], max_path_len),
]
)
return aug_dps
def main():
parser = argparse.ArgumentParser(description="Generate datapoints from AST")
parser.add_argument("--ast_fp", "-a", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath with the output dps"
)
parser.add_argument(
"--n_ctx", "-c", type=int, default=1000, help="Number of contexts for each dp"
)
parser.add_argument(
"--max_path_len",
"-p",
type=int,
default=13,
help="Max length of rootpath route",
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Writing dps to: {}".format(args.out_fp))
num_dps = 0
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
dp = json.loads(line.strip())
for dp in get_dps(dp, args.n_ctx, args.max_path_len):
if len(dp[0]) > 1:
json.dump(dp, fout)
fout.write("\n")
num_dps += 1
logging.info("Wrote {} datapoints to {}".format(num_dps, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/path_trans/generate_data.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from utils import file_tqdm, separate_dps
logging.basicConfig(level=logging.INFO)
def get_leaf_ids(ast):
ids = {"leaf_ids": [], "internal_ids": []}
for i, node in enumerate(ast):
if "value" in node:
ids["leaf_ids"].append(i)
else:
ids["internal_ids"].append(i)
return ids
def get_value_ids(ast):
ids = {"attr_ids": [], "num_ids": [], "name_ids": [], "param_ids": []}
for i, node in enumerate(ast):
if "type" in node:
if node["type"] == "attr":
ids["attr_ids"].append(
i + 1
) # + 1 since i is the type, and we want the value
elif node["type"] == "Num":
ids["num_ids"].append(i + 1)
elif node["type"] in {"NameLoad", "NameStore"}:
ids["name_ids"].append(i + 1)
elif node["type"] == "NameParam":
ids["param_ids"].append(i + 1)
return ids
def get_type_ids(ast):
ids = {
"call_ids": [],
"assign_ids": [],
"return_ids": [],
"list_ids": [],
"dict_ids": [],
"raise_ids": [],
}
for i, node in enumerate(ast):
if "type" in node:
type_ = node["type"]
if type_ == "Call":
ids["call_ids"].append(i)
elif type_ == "Assign":
ids["assign_ids"].append(i)
elif type_ == "Return":
ids["return_ids"].append(i)
elif type_ in {"ListComp", "ListLoad", "ListStore"}:
ids["list_ids"].append(i)
elif type_ in {"DictComp", "DictLoad", "DictStore"}:
ids["dict_ids"].append(i)
elif type_ == "Raise":
ids["raise_ids"].append(i)
return ids
def main():
parser = argparse.ArgumentParser(
description="Generate ids (leaf, values, types) from AST"
)
parser.add_argument(
"--ast_fp", "-a", help="Filepath with the new ASTs to be parsed"
)
parser.add_argument(
"--out_fp", "-o", default="/tmp/ids.txt", help="Filepath for the output ids"
)
parser.add_argument(
"--n_ctx", "-c", type=int, default=1000, help="Number of contexts for each dp"
)
parser.add_argument(
"id_type",
choices=["leaf", "value", "type", "all"],
default="leaf",
help="Which ids to generate. Default = leaf",
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Type of id to get: {}".format(args.id_type))
logging.info("Loading dps from: {}".format(args.ast_fp))
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
dp = json.loads(line.strip())
asts = separate_dps(dp, args.n_ctx)
for ast, _ in asts:
ids = {}
if len(ast) > 1:
if args.id_type in {"leaf", "all"}:
ids.update(get_leaf_ids(ast))
if args.id_type in {"value", "all"}:
ids.update(get_value_ids(ast))
if args.id_type in {"type", "all"}:
ids.update(get_type_ids(ast))
json.dump(ids, fp=fout)
fout.write("\n")
logging.info("Wrote to: {}".format(args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/trav_trans/generate_ast_ids.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from dataset.dataset import BaseDataset, BaseSetup, BaseVocab
class Setup(BaseSetup):
def _create_vocab(self):
return Vocab(self.filepaths["vocab"])
def _create_dataset(self, fp, ids_fp):
return Dataset(fp, ids_fp)
class Vocab(BaseVocab):
def convert(self, line):
dp, ext = line
dp_conv = [
self.vocab2idx[token] if token in self.vocab2idx else self.unk_idx
for token in dp
]
return [dp_conv, ext]
class Dataset(BaseDataset):
@staticmethod
def collate(seqs, pad_idx):
max_len = max(len(seq[0][0]) for seq in seqs)
max_len = max(max_len, 2)
input_seqs = []
target_seqs = []
extended = []
ids = {name: [] for name in seqs[0][1].keys()}
for i, ((seq, ext), ids_lst) in enumerate(seqs):
padding = [pad_idx] * (max_len - len(seq))
input_seqs.append(seq[:-1] + padding)
target_seqs.append(seq[1:] + padding)
extended.append(ext)
for name, lst in ids_lst.items():
ids[name] += [j - 1 + (max_len - 1) * i for j in lst]
return {
"input_seq": torch.tensor(input_seqs),
"target_seq": torch.tensor(target_seqs),
"extended": torch.tensor(extended),
"ids": ids,
}
| code-prediction-transformer-main | models/trav_trans/dataset.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from utils import file_tqdm, get_dfs, separate_dps
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description="Generate datapoints from AST")
parser.add_argument("--ast_fp", "-a", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath for the output dps"
)
parser.add_argument(
"--n_ctx", "-c", type=int, default=1000, help="Number of contexts for each dp"
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Number of context: {}".format(args.n_ctx))
num_dps = 0
logging.info("Loading asts from: {}".format(args.ast_fp))
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
dp = json.loads(line.strip())
asts = separate_dps(dp, args.n_ctx)
for ast, extended in asts:
if len(ast) > 1:
json.dump([get_dfs(ast), extended], fp=fout)
fout.write("\n")
num_dps += 1
logging.info("Wrote {} datapoints to {}".format(num_dps, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | models/trav_trans/generate_data.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup, find_packages
NAME = 'audiocraft'
DESCRIPTION = 'Audio generation research library for PyTorch'
URL = 'https://github.com/facebookresearch/audiocraft'
AUTHOR = 'FAIR Speech & Audio'
EMAIL = '[email protected], [email protected]'
REQUIRES_PYTHON = '>=3.8.0'
for line in open('audiocraft/__init__.py'):
line = line.strip()
if '__version__' in line:
context = {}
exec(line, context)
VERSION = context['__version__']
HERE = Path(__file__).parent
try:
with open(HERE / "README.md", encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author_email=EMAIL,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
url=URL,
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRED,
extras_require={
'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'],
},
packages=find_packages(),
package_data={'audiocraft': ['py.typed']},
include_package_data=True,
license='MIT License',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| audiocraft-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
AudioCraft is a general framework for training audio generative models.
At the moment we provide the training code for:
- [MusicGen](https://arxiv.org/abs/2306.05284), a state-of-the-art
text-to-music and melody+text autoregressive generative model.
For the solver, see `audiocraft.solvers.musicgen.MusicGenSolver`, and for the model,
`audiocraft.models.musicgen.MusicGen`.
- [AudioGen](https://arxiv.org/abs/2209.15352), a state-of-the-art
text-to-general-audio generative model.
- [EnCodec](https://arxiv.org/abs/2210.13438), efficient and high fidelity
neural audio codec which provides an excellent tokenizer for autoregressive language models.
See `audiocraft.solvers.compression.CompressionSolver`, and `audiocraft.models.encodec.EncodecModel`.
- [MultiBandDiffusion](TODO), alternative diffusion-based decoder compatible with EnCodec that
improves the perceived quality and reduces the artifacts coming from adversarial decoders.
"""
# flake8: noqa
from . import data, modules, models
__version__ = '1.0.0'
| audiocraft-main | audiocraft/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Entry point for dora to launch solvers for running training loops.
See more info on how to use dora: https://github.com/facebookresearch/dora
"""
import logging
import multiprocessing
import os
import sys
import typing as tp
from dora import git_save, hydra_main, XP
import flashy
import hydra
import omegaconf
from .environment import AudioCraftEnvironment
from .utils.cluster import get_slurm_parameters
logger = logging.getLogger(__name__)
def resolve_config_dset_paths(cfg):
"""Enable Dora to load manifest from git clone repository."""
# manifest files for the different splits
for key, value in cfg.datasource.items():
if isinstance(value, str):
cfg.datasource[key] = git_save.to_absolute_path(value)
def get_solver(cfg):
from . import solvers
# Convert batch size to batch size for each GPU
assert cfg.dataset.batch_size % flashy.distrib.world_size() == 0
cfg.dataset.batch_size //= flashy.distrib.world_size()
for split in ['train', 'valid', 'evaluate', 'generate']:
if hasattr(cfg.dataset, split) and hasattr(cfg.dataset[split], 'batch_size'):
assert cfg.dataset[split].batch_size % flashy.distrib.world_size() == 0
cfg.dataset[split].batch_size //= flashy.distrib.world_size()
resolve_config_dset_paths(cfg)
solver = solvers.get_solver(cfg)
return solver
def get_solver_from_xp(xp: XP, override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
restore: bool = True, load_best: bool = True,
ignore_state_keys: tp.List[str] = [], disable_fsdp: bool = True):
"""Given a XP, return the Solver object.
Args:
xp (XP): Dora experiment for which to retrieve the solver.
override_cfg (dict or None): If not None, should be a dict used to
override some values in the config of `xp`. This will not impact
the XP signature or folder. The format is different
than the one used in Dora grids, nested keys should actually be nested dicts,
not flattened, e.g. `{'optim': {'batch_size': 32}}`.
restore (bool): If `True` (the default), restore state from the last checkpoint.
load_best (bool): If `True` (the default), load the best state from the checkpoint.
ignore_state_keys (list[str]): List of sources to ignore when loading the state, e.g. `optimizer`.
disable_fsdp (bool): if True, disables FSDP entirely. This will
also automatically skip loading the EMA. For solver specific
state sources, like the optimizer, you might want to
use along `ignore_state_keys=['optimizer']`. Must be used with `load_best=True`.
"""
logger.info(f"Loading solver from XP {xp.sig}. "
f"Overrides used: {xp.argv}")
cfg = xp.cfg
if override_cfg is not None:
cfg = omegaconf.OmegaConf.merge(cfg, omegaconf.DictConfig(override_cfg))
if disable_fsdp and cfg.fsdp.use:
cfg.fsdp.use = False
assert load_best is True
# ignoring some keys that were FSDP sharded like model, ema, and best_state.
# fsdp_best_state will be used in that case. When using a specific solver,
# one is responsible for adding the relevant keys, e.g. 'optimizer'.
# We could make something to automatically register those inside the solver, but that
# seem overkill at this point.
ignore_state_keys = ignore_state_keys + ['model', 'ema', 'best_state']
try:
with xp.enter():
solver = get_solver(cfg)
if restore:
solver.restore(load_best=load_best, ignore_state_keys=ignore_state_keys)
return solver
finally:
hydra.core.global_hydra.GlobalHydra.instance().clear()
def get_solver_from_sig(sig: str, *args, **kwargs):
"""Return Solver object from Dora signature, i.e. to play with it from a notebook.
See `get_solver_from_xp` for more information.
"""
xp = main.get_xp_from_sig(sig)
return get_solver_from_xp(xp, *args, **kwargs)
def init_seed_and_system(cfg):
import numpy as np
import torch
import random
from audiocraft.modules.transformer import set_efficient_attention_backend
multiprocessing.set_start_method(cfg.mp_start_method)
logger.debug('Setting mp start method to %s', cfg.mp_start_method)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
# torch also initialize cuda seed if available
torch.manual_seed(cfg.seed)
torch.set_num_threads(cfg.num_threads)
os.environ['MKL_NUM_THREADS'] = str(cfg.num_threads)
os.environ['OMP_NUM_THREADS'] = str(cfg.num_threads)
logger.debug('Setting num threads to %d', cfg.num_threads)
set_efficient_attention_backend(cfg.efficient_attention_backend)
logger.debug('Setting efficient attention backend to %s', cfg.efficient_attention_backend)
@hydra_main(config_path='../config', config_name='config', version_base='1.1')
def main(cfg):
init_seed_and_system(cfg)
# Setup logging both to XP specific folder, and to stderr.
log_name = '%s.log.{rank}' % cfg.execute_only if cfg.execute_only else 'solver.log.{rank}'
flashy.setup_logging(level=str(cfg.logging.level).upper(), log_name=log_name)
# Initialize distributed training, no need to specify anything when using Dora.
flashy.distrib.init()
solver = get_solver(cfg)
if cfg.show:
solver.show()
return
if cfg.execute_only:
assert cfg.execute_inplace or cfg.continue_from is not None, \
"Please explicitly specify the checkpoint to continue from with continue_from=<sig_or_path> " + \
"when running with execute_only or set execute_inplace to True."
solver.restore(replay_metrics=False) # load checkpoint
solver.run_one_stage(cfg.execute_only)
return
return solver.run()
main.dora.dir = AudioCraftEnvironment.get_dora_dir()
main._base_cfg.slurm = get_slurm_parameters(main._base_cfg.slurm)
if main.dora.shared is not None and not os.access(main.dora.shared, os.R_OK):
print("No read permission on dora.shared folder, ignoring it.", file=sys.stderr)
main.dora.shared = None
if __name__ == '__main__':
main()
| audiocraft-main | audiocraft/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides cluster and tools configuration across clusters (slurm, dora, utilities).
"""
import logging
import os
from pathlib import Path
import re
import typing as tp
import omegaconf
from .utils.cluster import _guess_cluster_type
logger = logging.getLogger(__name__)
class AudioCraftEnvironment:
"""Environment configuration for teams and clusters.
AudioCraftEnvironment picks compute cluster settings (slurm, dora) from the current running environment
or declared variable and the loaded team configuration. Additionally, the AudioCraftEnvironment
provides pointers to a reference folder resolved automatically across clusters that is shared across team members,
allowing to share sigs or other files to run jobs. Finally, it provides dataset mappers to automatically
map dataset file paths to new locations across clusters, allowing to use the same manifest of files across cluters.
The cluster type is identified automatically and base configuration file is read from config/teams.yaml.
Use the following environment variables to specify the cluster, team or configuration:
AUDIOCRAFT_CLUSTER (optional): Cluster type to enforce. Useful if the cluster type
cannot be inferred automatically.
AUDIOCRAFT_CONFIG (optional): Path to yaml config holding the teams configuration.
If not set, configuration is read from config/teams.yaml.
AUDIOCRAFT_TEAM (optional): Name of the team. Recommended to set to your own team.
Cluster configuration are shared across teams to match compute allocation,
specify your cluster configuration in the configuration file under a key mapping
your team name.
"""
_instance = None
DEFAULT_TEAM = "default"
def __init__(self) -> None:
"""Loads configuration."""
self.team: str = os.getenv("AUDIOCRAFT_TEAM", self.DEFAULT_TEAM)
cluster_type = _guess_cluster_type()
cluster = os.getenv(
"AUDIOCRAFT_CLUSTER", cluster_type.value
)
logger.info("Detecting cluster type %s", cluster_type)
self.cluster: str = cluster
config_path = os.getenv(
"AUDIOCRAFT_CONFIG",
Path(__file__)
.parent.parent.joinpath("config/teams", self.team)
.with_suffix(".yaml"),
)
self.config = omegaconf.OmegaConf.load(config_path)
self._dataset_mappers = []
cluster_config = self._get_cluster_config()
if "dataset_mappers" in cluster_config:
for pattern, repl in cluster_config["dataset_mappers"].items():
regex = re.compile(pattern)
self._dataset_mappers.append((regex, repl))
def _get_cluster_config(self) -> omegaconf.DictConfig:
assert isinstance(self.config, omegaconf.DictConfig)
return self.config[self.cluster]
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def reset(cls):
"""Clears the environment and forces a reload on next invocation."""
cls._instance = None
@classmethod
def get_team(cls) -> str:
"""Gets the selected team as dictated by the AUDIOCRAFT_TEAM env var.
If not defined, defaults to "labs".
"""
return cls.instance().team
@classmethod
def get_cluster(cls) -> str:
"""Gets the detected cluster.
This value can be overridden by the AUDIOCRAFT_CLUSTER env var.
"""
return cls.instance().cluster
@classmethod
def get_dora_dir(cls) -> Path:
"""Gets the path to the dora directory for the current team and cluster.
Value is overridden by the AUDIOCRAFT_DORA_DIR env var.
"""
cluster_config = cls.instance()._get_cluster_config()
dora_dir = os.getenv("AUDIOCRAFT_DORA_DIR", cluster_config["dora_dir"])
logger.warning(f"Dora directory: {dora_dir}")
return Path(dora_dir)
@classmethod
def get_reference_dir(cls) -> Path:
"""Gets the path to the reference directory for the current team and cluster.
Value is overridden by the AUDIOCRAFT_REFERENCE_DIR env var.
"""
cluster_config = cls.instance()._get_cluster_config()
return Path(os.getenv("AUDIOCRAFT_REFERENCE_DIR", cluster_config["reference_dir"]))
@classmethod
def get_slurm_exclude(cls) -> tp.Optional[str]:
"""Get the list of nodes to exclude for that cluster."""
cluster_config = cls.instance()._get_cluster_config()
return cluster_config.get("slurm_exclude")
@classmethod
def get_slurm_partitions(cls, partition_types: tp.Optional[tp.List[str]] = None) -> str:
"""Gets the requested partitions for the current team and cluster as a comma-separated string.
Args:
partition_types (list[str], optional): partition types to retrieve. Values must be
from ['global', 'team']. If not provided, the global partition is returned.
"""
if not partition_types:
partition_types = ["global"]
cluster_config = cls.instance()._get_cluster_config()
partitions = [
cluster_config["partitions"][partition_type]
for partition_type in partition_types
]
return ",".join(partitions)
@classmethod
def resolve_reference_path(cls, path: tp.Union[str, Path]) -> Path:
"""Converts reference placeholder in path with configured reference dir to resolve paths.
Args:
path (str or Path): Path to resolve.
Returns:
Path: Resolved path.
"""
path = str(path)
if path.startswith("//reference"):
reference_dir = cls.get_reference_dir()
logger.warn(f"Reference directory: {reference_dir}")
assert (
reference_dir.exists() and reference_dir.is_dir()
), f"Reference directory does not exist: {reference_dir}."
path = re.sub("^//reference", str(reference_dir), path)
return Path(path)
@classmethod
def apply_dataset_mappers(cls, path: str) -> str:
"""Applies dataset mapping regex rules as defined in the configuration.
If no rules are defined, the path is returned as-is.
"""
instance = cls.instance()
for pattern, repl in instance._dataset_mappers:
path = pattern.sub(repl, path)
return path
| audiocraft-main | audiocraft/environment.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchmetrics
from ..data.audio_utils import convert_audio
from ..modules.chroma import ChromaExtractor
class ChromaCosineSimilarityMetric(torchmetrics.Metric):
"""Chroma cosine similarity metric.
This metric extracts a chromagram for a reference waveform and
a generated waveform and compares each frame using the cosine similarity
function. The output is the mean cosine similarity.
Args:
sample_rate (int): Sample rate used by the chroma extractor.
n_chroma (int): Number of chroma used by the chroma extractor.
radix2_exp (int): Exponent for the chroma extractor.
argmax (bool): Whether the chroma extractor uses argmax.
eps (float): Epsilon for cosine similarity computation.
"""
def __init__(self, sample_rate: int, n_chroma: int, radix2_exp: int, argmax: bool, eps: float = 1e-8):
super().__init__()
self.chroma_sample_rate = sample_rate
self.n_chroma = n_chroma
self.eps = eps
self.chroma_extractor = ChromaExtractor(sample_rate=self.chroma_sample_rate, n_chroma=self.n_chroma,
radix2_exp=radix2_exp, argmax=argmax)
self.add_state("cosine_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
self.add_state("weight", default=torch.tensor(0.), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, targets: torch.Tensor,
sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
"""Compute cosine similarity between chromagrams and accumulate scores over the dataset."""
if preds.size(0) == 0:
return
assert preds.shape == targets.shape, (
f"Preds and target shapes mismatch: preds={preds.shape}, targets={targets.shape}")
assert preds.size(0) == sizes.size(0), (
f"Number of items in preds ({preds.shape}) mismatch ",
f"with sizes ({sizes.shape})")
assert preds.size(0) == sample_rates.size(0), (
f"Number of items in preds ({preds.shape}) mismatch ",
f"with sample_rates ({sample_rates.shape})")
assert torch.all(sample_rates == sample_rates[0].item()), "All sample rates are not the same in the batch"
device = self.weight.device
preds, targets = preds.to(device), targets.to(device) # type: ignore
sample_rate = sample_rates[0].item()
preds = convert_audio(preds, from_rate=sample_rate, to_rate=self.chroma_sample_rate, to_channels=1)
targets = convert_audio(targets, from_rate=sample_rate, to_rate=self.chroma_sample_rate, to_channels=1)
gt_chroma = self.chroma_extractor(targets)
gen_chroma = self.chroma_extractor(preds)
chroma_lens = (sizes / self.chroma_extractor.winhop).ceil().int()
for i in range(len(gt_chroma)):
t = int(chroma_lens[i].item())
cosine_sim = torch.nn.functional.cosine_similarity(
gt_chroma[i, :t], gen_chroma[i, :t], dim=1, eps=self.eps)
self.cosine_sum += cosine_sim.sum(dim=0) # type: ignore
self.weight += torch.tensor(t) # type: ignore
def compute(self) -> float:
"""Computes the average cosine similarty across all generated/target chromagrams pairs."""
assert self.weight.item() > 0, "Unable to compute with total number of comparisons <= 0" # type: ignore
return (self.cosine_sum / self.weight).item() # type: ignore
| audiocraft-main | audiocraft/metrics/chroma_cosinesim.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import typing as tp
import torch
import torchmetrics
from transformers import RobertaTokenizer # type: ignore
from ..data.audio_utils import convert_audio
from ..environment import AudioCraftEnvironment
from ..utils.utils import load_clap_state_dict
try:
import laion_clap # type: ignore
except ImportError:
laion_clap = None
class TextConsistencyMetric(torchmetrics.Metric):
"""Text consistency metric measuring consistency between audio and text pairs."""
def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
raise NotImplementedError("implement how to update the metric from the audio and text pairs.")
def compute(self):
raise NotImplementedError("implement how to compute the final metric score.")
class CLAPTextConsistencyMetric(TextConsistencyMetric):
"""Text consistency metric relying on Contrastive Language-Audio Pretraining (CLAP).
This metric is similar to the MuLan Cycle Consistency from MusicLM (https://arxiv.org/pdf/2301.11325.pdf)
or the CLAP score used in Make-An-Audio (https://arxiv.org/pdf/2301.12661v1.pdf).
As a joint audio-text embedding model, a pretrained CLAP model can be used to quantify the
similarity between audio-text pairs. We compute the CLAP embeddings from the text descriptions as
well as the generated audio based on them, and define the MCC metric as the average cosine similarity
between these embeddings.
Model implementation & pre-trained checkpoints: https://github.com/LAION-AI/CLAP
"""
def __init__(self, model_path: tp.Union[str, Path], model_arch: str = 'HTSAT-tiny', enable_fusion: bool = False):
super().__init__()
if laion_clap is None:
raise ImportError("Please install CLAP to compute text consistency: 'pip install laion_clap'")
self.add_state("cosine_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
self.add_state("weight", default=torch.tensor(0.), dist_reduce_fx="sum")
self._initialize_model(model_path, model_arch, enable_fusion)
def _initialize_model(self, model_path: tp.Union[str, Path], model_arch: str, enable_fusion: bool):
model_path = AudioCraftEnvironment.resolve_reference_path(model_path)
self.tokenize = RobertaTokenizer.from_pretrained('roberta-base')
self.model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch)
self.model_sample_rate = 48_000
load_clap_state_dict(self.model, model_path)
self.model.eval()
def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict:
# we use the default params from CLAP module here as well
return self.tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt")
def update(self, audio: torch.Tensor, text: tp.List[str], sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
"""Compute cosine similarity between audio and text pairs and accumulate scores over the dataset."""
assert audio.size(0) == len(text), "Number of audio and text samples should match"
assert torch.all(sample_rates == sample_rates[0].item()), "All items in batch should have the same sample rate"
sample_rate = int(sample_rates[0].item())
# convert audio batch to 48kHz monophonic audio with no channel dimension: [B, C, T] -> [B, T]
audio = convert_audio(audio, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1).mean(dim=1)
audio_embeddings = self.model.get_audio_embedding_from_data(audio, use_tensor=True)
text_embeddings = self.model.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True)
# cosine similarity between the text and the audio embedding
cosine_sim = torch.nn.functional.cosine_similarity(audio_embeddings, text_embeddings, dim=1, eps=1e-8)
self.cosine_sum += cosine_sim.sum(dim=0)
self.weight += torch.tensor(cosine_sim.size(0))
def compute(self):
"""Computes the average cosine similarty across all audio/text pairs."""
assert self.weight.item() > 0, "Unable to compute with total number of comparisons <= 0" # type: ignore
return (self.cosine_sum / self.weight).item() # type: ignore
| audiocraft-main | audiocraft/metrics/clap_consistency.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Metrics like CLAP score, FAD, KLD, Visqol, Chroma similarity, etc.
"""
# flake8: noqa
from .clap_consistency import CLAPTextConsistencyMetric, TextConsistencyMetric
from .chroma_cosinesim import ChromaCosineSimilarityMetric
from .fad import FrechetAudioDistanceMetric
from .kld import KLDivergenceMetric, PasstKLDivergenceMetric
from .rvm import RelativeVolumeMel
from .visqol import ViSQOL
| audiocraft-main | audiocraft/metrics/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
import os
import subprocess
import tempfile
import typing as tp
from audiocraft.data.audio import audio_write
from audiocraft.data.audio_utils import convert_audio
import flashy
import torch
import torchmetrics
from ..environment import AudioCraftEnvironment
logger = logging.getLogger(__name__)
VGGISH_SAMPLE_RATE = 16_000
VGGISH_CHANNELS = 1
class FrechetAudioDistanceMetric(torchmetrics.Metric):
"""Fréchet Audio Distance computation based on official TensorFlow implementation from Google Research.
From: D.C. Dowson & B.V. Landau The Fréchet distance between
multivariate normal distributions
https://doi.org/10.1016/0047-259X(82)90077-X
The Fréchet distance between two multivariate gaussians,
`X ~ N(mu_x, sigma_x)` and `Y ~ N(mu_y, sigma_y)`, is `d^2`.
d^2 = (mu_x - mu_y)^2 + Tr(sigma_x + sigma_y - 2 * sqrt(sigma_x*sigma_y))
= (mu_x - mu_y)^2 + Tr(sigma_x) + Tr(sigma_y)
- 2 * Tr(sqrt(sigma_x*sigma_y)))
To use this FAD computation metric, you need to have the proper Frechet Audio Distance tool setup
from: https://github.com/google-research/google-research/tree/master/frechet_audio_distance
We provide the below instructions as reference but we do not guarantee for further support
in frechet_audio_distance installation. This was tested with python 3.10, cuda 11.8, tensorflow 2.12.0.
We recommend installing the frechet_audio_distance library in a dedicated env (e.g. conda).
1. Get the code and models following the repository instructions. We used the steps below:
git clone [email protected]:google-research/google-research.git
git clone [email protected]:tensorflow/models.git
mkdir google-research/tensorflow_models
touch google-research/tensorflow_models/__init__.py
cp -r models/research/audioset google-research/tensorflow_models/
touch google-research/tensorflow_models/audioset/__init__.py
echo "from .vggish import mel_features, vggish_params, vggish_slim" > \
google-research/tensorflow_models/audioset/__init__.py
# we can now remove the tensorflow models repository
# rm -r models
cd google-research
Follow the instructions to download the vggish checkpoint. AudioCraft base configuration
assumes it is placed in the AudioCraft reference dir.
Note that we operate the following changes for the code to work with TensorFlow 2.X and python 3:
- Update xrange for range in:
https://github.com/google-research/google-research/blob/master/frechet_audio_distance/audioset_model.py
- Update `tf_record = tf.python_io.tf_record_iterator(filename).next()` to
`tf_record = tf.python_io.tf_record_iterator(filename).__next__()` in
https://github.com/google-research/google-research/blob/master/frechet_audio_distance/fad_utils.py
- Update `import vggish_params as params` to `from . import vggish_params as params` in:
https://github.com/tensorflow/models/blob/master/research/audioset/vggish/vggish_slim.py
- Add flag to provide a given batch size for running the AudioSet model in:
https://github.com/google-research/google-research/blob/master/frechet_audio_distance/create_embeddings_main.py
```
flags.DEFINE_integer('batch_size', 64,
'Number of samples in the batch for AudioSet model.')
```
Ensure you pass the flag to the create_embeddings_beam.create_pipeline function, adding:
`batch_size=FLAGS.batch_size` to the provided parameters.
2. Follow instructions for the library installation and a valid TensorFlow installation
```
# e.g. instructions from: https://www.tensorflow.org/install/pip
conda install -c conda-forge cudatoolkit=11.8.0
python3 -m pip install nvidia-cudnn-cu11==8.6.0.163 tensorflow==2.12.*
mkdir -p $CONDA_PREFIX/etc/conda/activate.d
echo 'CUDNN_PATH=$(dirname $(python -c "import nvidia.cudnn;print(nvidia.cudnn.__file__)"))' \
>> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/:$CUDNN_PATH/lib' \
>> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
source $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
# Verify install: on a machine with GPU device
python3 -c "import tensorflow as tf; print(tf.config.list_physical_devices('GPU'))"
```
Now install frechet_audio_distance required dependencies:
```
# We assume we already have TensorFlow installed from the above steps
pip install apache-beam numpy scipy tf_slim
```
Finally, follow remaining library instructions to ensure you have a working frechet_audio_distance setup
(you may want to specify --model_ckpt flag pointing to the model's path).
3. AudioCraft's FrechetAudioDistanceMetric requires 2 environment variables pointing to the python executable
and Tensorflow library path from the above installation steps:
export TF_PYTHON_EXE="<PATH_TO_THE_ENV_PYTHON_BINARY>"
export TF_LIBRARY_PATH="<PATH_TO_THE_ENV_CUDNN_LIBRARY>"
e.g. assuming we have installed everything in a dedicated conda env
with python 3.10 that is currently active:
export TF_PYTHON_EXE="$CONDA_PREFIX/bin/python"
export TF_LIBRARY_PATH="$CONDA_PREFIX/lib/python3.10/site-packages/nvidia/cudnn/lib"
Finally you may want to export the following variable:
export TF_FORCE_GPU_ALLOW_GROWTH=true
See: https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
You can save those environment variables in your training conda env, when currently active:
`$CONDA_PREFIX/etc/conda/activate.d/env_vars.sh`
e.g. assuming the env with TensorFlow and frechet_audio_distance install is named ac_eval,
and the training conda env is named audiocraft:
```
# activate training env
conda activate audiocraft
# get path to all envs
CONDA_ENV_DIR=$(dirname $CONDA_PREFIX)
# export pointers to evaluation env for using TensorFlow in FrechetAudioDistanceMetric
touch $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
echo 'export TF_PYTHON_EXE="$CONDA_ENV_DIR/ac_eval/bin/python"' >> \
$CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
echo 'export TF_LIBRARY_PATH="$CONDA_ENV_DIR/ac_eval/lib/python3.10/site-packages/nvidia/cudnn/lib"' >> \
$CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
# optionally:
echo 'export TF_FORCE_GPU_ALLOW_GROWTH=true' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
# you may need to reactivate the audiocraft env for this to take effect
```
Args:
bin (Path or str): Path to installed frechet audio distance code.
model_path (Path or str): Path to Tensorflow checkpoint for the model
used to compute statistics over the embedding beams.
format (str): Audio format used to save files.
log_folder (Path or str, optional): Path where to write process logs.
"""
def __init__(self, bin: tp.Union[Path, str], model_path: tp.Union[Path, str],
format: str = "wav", batch_size: tp.Optional[int] = None,
log_folder: tp.Optional[tp.Union[Path, str]] = None):
super().__init__()
self.model_sample_rate = VGGISH_SAMPLE_RATE
self.model_channels = VGGISH_CHANNELS
self.model_path = AudioCraftEnvironment.resolve_reference_path(model_path)
assert Path(self.model_path).exists(), f"Could not find provided model checkpoint path at: {self.model_path}"
self.format = format
self.batch_size = batch_size
self.bin = bin
self.tf_env = {"PYTHONPATH": str(self.bin)}
self.python_path = os.environ.get('TF_PYTHON_EXE') or 'python'
logger.info("Python exe for TF is %s", self.python_path)
if 'TF_LIBRARY_PATH' in os.environ:
self.tf_env['LD_LIBRARY_PATH'] = os.environ['TF_LIBRARY_PATH']
if 'TF_FORCE_GPU_ALLOW_GROWTH' in os.environ:
self.tf_env['TF_FORCE_GPU_ALLOW_GROWTH'] = os.environ['TF_FORCE_GPU_ALLOW_GROWTH']
logger.info("Env for TF is %r", self.tf_env)
self.reset(log_folder)
self.add_state("total_files", default=torch.tensor(0.), dist_reduce_fx="sum")
def reset(self, log_folder: tp.Optional[tp.Union[Path, str]] = None):
"""Reset torchmetrics.Metrics state."""
log_folder = Path(log_folder or tempfile.mkdtemp())
self.tmp_dir = log_folder / 'fad'
self.tmp_dir.mkdir(exist_ok=True)
self.samples_tests_dir = self.tmp_dir / 'tests'
self.samples_tests_dir.mkdir(exist_ok=True)
self.samples_background_dir = self.tmp_dir / 'background'
self.samples_background_dir.mkdir(exist_ok=True)
self.manifest_tests = self.tmp_dir / 'files_tests.cvs'
self.manifest_background = self.tmp_dir / 'files_background.cvs'
self.stats_tests_dir = self.tmp_dir / 'stats_tests'
self.stats_background_dir = self.tmp_dir / 'stats_background'
self.counter = 0
def update(self, preds: torch.Tensor, targets: torch.Tensor,
sizes: torch.Tensor, sample_rates: torch.Tensor,
stems: tp.Optional[tp.List[str]] = None):
"""Update torchmetrics.Metrics by saving the audio and updating the manifest file."""
assert preds.shape == targets.shape, f"preds={preds.shape} != targets={targets.shape}"
num_samples = preds.shape[0]
assert num_samples == sizes.size(0) and num_samples == sample_rates.size(0)
assert stems is None or num_samples == len(set(stems))
for i in range(num_samples):
self.total_files += 1 # type: ignore
self.counter += 1
wav_len = int(sizes[i].item())
sample_rate = int(sample_rates[i].item())
pred_wav = preds[i]
target_wav = targets[i]
pred_wav = pred_wav[..., :wav_len]
target_wav = target_wav[..., :wav_len]
stem_name = stems[i] if stems is not None else f'sample_{self.counter}_{flashy.distrib.rank()}'
# dump audio files
try:
pred_wav = convert_audio(
pred_wav.unsqueeze(0), from_rate=sample_rate,
to_rate=self.model_sample_rate, to_channels=1).squeeze(0)
audio_write(
self.samples_tests_dir / stem_name, pred_wav, sample_rate=self.model_sample_rate,
format=self.format, strategy="peak")
except Exception as e:
logger.error(f"Exception occured when saving tests files for FAD computation: {repr(e)} - {e}")
try:
# for the ground truth audio, we enforce the 'peak' strategy to avoid modifying
# the original audio when writing it
target_wav = convert_audio(
target_wav.unsqueeze(0), from_rate=sample_rate,
to_rate=self.model_sample_rate, to_channels=1).squeeze(0)
audio_write(
self.samples_background_dir / stem_name, target_wav, sample_rate=self.model_sample_rate,
format=self.format, strategy="peak")
except Exception as e:
logger.error(f"Exception occured when saving background files for FAD computation: {repr(e)} - {e}")
def _get_samples_name(self, is_background: bool):
return 'background' if is_background else 'tests'
def _create_embedding_beams(self, is_background: bool, gpu_index: tp.Optional[int] = None):
if is_background:
input_samples_dir = self.samples_background_dir
input_filename = self.manifest_background
stats_name = self.stats_background_dir
else:
input_samples_dir = self.samples_tests_dir
input_filename = self.manifest_tests
stats_name = self.stats_tests_dir
beams_name = self._get_samples_name(is_background)
log_file = self.tmp_dir / f'fad_logs_create_beams_{beams_name}.log'
logger.info(f"Scanning samples folder to fetch list of files: {input_samples_dir}")
with open(input_filename, "w") as fout:
for path in Path(input_samples_dir).glob(f"*.{self.format}"):
fout.write(f"{str(path)}\n")
cmd = [
self.python_path, "-m",
"frechet_audio_distance.create_embeddings_main",
"--model_ckpt", f"{self.model_path}",
"--input_files", f"{str(input_filename)}",
"--stats", f"{str(stats_name)}",
]
if self.batch_size is not None:
cmd += ["--batch_size", str(self.batch_size)]
logger.info(f"Launching frechet_audio_distance embeddings main method: {' '.join(cmd)} on {beams_name}")
env = os.environ
if gpu_index is not None:
env["CUDA_VISIBLE_DEVICES"] = str(gpu_index)
process = subprocess.Popen(
cmd, stdout=open(log_file, "w"), env={**env, **self.tf_env}, stderr=subprocess.STDOUT)
return process, log_file
def _compute_fad_score(self, gpu_index: tp.Optional[int] = None):
cmd = [
self.python_path, "-m", "frechet_audio_distance.compute_fad",
"--test_stats", f"{str(self.stats_tests_dir)}",
"--background_stats", f"{str(self.stats_background_dir)}",
]
logger.info(f"Launching frechet_audio_distance compute fad method: {' '.join(cmd)}")
env = os.environ
if gpu_index is not None:
env["CUDA_VISIBLE_DEVICES"] = str(gpu_index)
result = subprocess.run(cmd, env={**env, **self.tf_env}, capture_output=True)
if result.returncode:
logger.error(
"Error with FAD computation from stats: \n %s \n %s",
result.stdout.decode(), result.stderr.decode()
)
raise RuntimeError("Error while executing FAD computation from stats")
try:
# result is "FAD: (d+).(d+)" hence we remove the prefix with (d+) being one digit or more
fad_score = float(result.stdout[4:])
return fad_score
except Exception as e:
raise RuntimeError(f"Error parsing FAD score from command stdout: {e}")
def _log_process_result(self, returncode: int, log_file: tp.Union[Path, str], is_background: bool) -> None:
beams_name = self._get_samples_name(is_background)
if returncode:
with open(log_file, "r") as f:
error_log = f.read()
logger.error(error_log)
os._exit(1)
else:
logger.info(f"Successfully computed embedding beams on {beams_name} samples.")
def _parallel_create_embedding_beams(self, num_of_gpus: int):
assert num_of_gpus > 0
logger.info("Creating embeddings beams in a parallel manner on different GPUs")
tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False, gpu_index=0)
bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True, gpu_index=1)
tests_beams_code = tests_beams_process.wait()
bg_beams_code = bg_beams_process.wait()
self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False)
self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True)
def _sequential_create_embedding_beams(self):
logger.info("Creating embeddings beams in a sequential manner")
tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False)
tests_beams_code = tests_beams_process.wait()
self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False)
bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True)
bg_beams_code = bg_beams_process.wait()
self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True)
@flashy.distrib.rank_zero_only
def _local_compute_frechet_audio_distance(self):
"""Compute Frechet Audio Distance score calling TensorFlow API."""
num_of_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
if num_of_gpus > 1:
self._parallel_create_embedding_beams(num_of_gpus)
else:
self._sequential_create_embedding_beams()
fad_score = self._compute_fad_score(gpu_index=0)
return fad_score
def compute(self) -> float:
"""Compute metrics."""
assert self.total_files.item() > 0, "No files dumped for FAD computation!" # type: ignore
fad_score = self._local_compute_frechet_audio_distance()
logger.warning(f"FAD score = {fad_score}")
fad_score = flashy.distrib.broadcast_object(fad_score, src=0)
return fad_score
| audiocraft-main | audiocraft/metrics/fad.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from functools import partial
import logging
import os
import typing as tp
import torch
import torchmetrics
from ..data.audio_utils import convert_audio
logger = logging.getLogger(__name__)
class _patch_passt_stft:
"""Decorator to patch torch.stft in PaSST."""
def __init__(self):
self.old_stft = torch.stft
def __enter__(self):
# return_complex is a mandatory parameter in latest torch versions
# torch is throwing RuntimeErrors when not set
torch.stft = partial(torch.stft, return_complex=False)
def __exit__(self, *exc):
torch.stft = self.old_stft
def kl_divergence(pred_probs: torch.Tensor, target_probs: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
"""Computes the elementwise KL-Divergence loss between probability distributions
from generated samples and target samples.
Args:
pred_probs (torch.Tensor): Probabilities for each label obtained
from a classifier on generated audio. Expected shape is [B, num_classes].
target_probs (torch.Tensor): Probabilities for each label obtained
from a classifier on target audio. Expected shape is [B, num_classes].
epsilon (float): Epsilon value.
Returns:
kld (torch.Tensor): KLD loss between each generated sample and target pair.
"""
kl_div = torch.nn.functional.kl_div((pred_probs + epsilon).log(), target_probs, reduction="none")
return kl_div.sum(-1)
class KLDivergenceMetric(torchmetrics.Metric):
"""Base implementation for KL Divergence metric.
The KL divergence is measured between probability distributions
of class predictions returned by a pre-trained audio classification model.
When the KL-divergence is low, the generated audio is expected to
have similar acoustic characteristics as the reference audio,
according to the classifier.
"""
def __init__(self):
super().__init__()
self.add_state("kld_pq_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
self.add_state("kld_qp_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
self.add_state("kld_all_sum", default=torch.tensor(0.), dist_reduce_fx="sum")
self.add_state("weight", default=torch.tensor(0), dist_reduce_fx="sum")
def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor,
sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]:
"""Get model output given provided input tensor.
Args:
x (torch.Tensor): Input audio tensor of shape [B, C, T].
sizes (torch.Tensor): Actual audio sample length, of shape [B].
sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
Returns:
probs (torch.Tensor): Probabilities over labels, of shape [B, num_classes].
"""
raise NotImplementedError("implement method to extract label distributions from the model.")
def update(self, preds: torch.Tensor, targets: torch.Tensor,
sizes: torch.Tensor, sample_rates: torch.Tensor) -> None:
"""Calculates running KL-Divergence loss between batches of audio
preds (generated) and target (ground-truth)
Args:
preds (torch.Tensor): Audio samples to evaluate, of shape [B, C, T].
targets (torch.Tensor): Target samples to compare against, of shape [B, C, T].
sizes (torch.Tensor): Actual audio sample length, of shape [B].
sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
"""
assert preds.shape == targets.shape
assert preds.size(0) > 0, "Cannot update the loss with empty tensors"
preds_probs = self._get_label_distribution(preds, sizes, sample_rates)
targets_probs = self._get_label_distribution(targets, sizes, sample_rates)
if preds_probs is not None and targets_probs is not None:
assert preds_probs.shape == targets_probs.shape
kld_scores = kl_divergence(preds_probs, targets_probs)
assert not torch.isnan(kld_scores).any(), "kld_scores contains NaN value(s)!"
self.kld_pq_sum += torch.sum(kld_scores)
kld_qp_scores = kl_divergence(targets_probs, preds_probs)
self.kld_qp_sum += torch.sum(kld_qp_scores)
self.weight += torch.tensor(kld_scores.size(0))
def compute(self) -> dict:
"""Computes KL-Divergence across all evaluated pred/target pairs."""
weight: float = float(self.weight.item()) # type: ignore
assert weight > 0, "Unable to compute with total number of comparisons <= 0"
logger.info(f"Computing KL divergence on a total of {weight} samples")
kld_pq = self.kld_pq_sum.item() / weight # type: ignore
kld_qp = self.kld_qp_sum.item() / weight # type: ignore
kld_both = kld_pq + kld_qp
return {'kld': kld_pq, 'kld_pq': kld_pq, 'kld_qp': kld_qp, 'kld_both': kld_both}
class PasstKLDivergenceMetric(KLDivergenceMetric):
"""KL-Divergence metric based on pre-trained PASST classifier on AudioSet.
From: PaSST: Efficient Training of Audio Transformers with Patchout
Paper: https://arxiv.org/abs/2110.05069
Implementation: https://github.com/kkoutini/PaSST
Follow instructions from the github repo:
```
pip install 'git+https://github.com/kkoutini/[email protected]#egg=hear21passt'
```
Args:
pretrained_length (float, optional): Audio duration used for the pretrained model.
"""
def __init__(self, pretrained_length: tp.Optional[float] = None):
super().__init__()
self._initialize_model(pretrained_length)
def _initialize_model(self, pretrained_length: tp.Optional[float] = None):
"""Initialize underlying PaSST audio classifier."""
model, sr, max_frames, min_frames = self._load_base_model(pretrained_length)
self.min_input_frames = min_frames
self.max_input_frames = max_frames
self.model_sample_rate = sr
self.model = model
self.model.eval()
self.model.to(self.device)
def _load_base_model(self, pretrained_length: tp.Optional[float]):
"""Load pretrained model from PaSST."""
try:
if pretrained_length == 30:
from hear21passt.base30sec import get_basic_model # type: ignore
max_duration = 30
elif pretrained_length == 20:
from hear21passt.base20sec import get_basic_model # type: ignore
max_duration = 20
else:
from hear21passt.base import get_basic_model # type: ignore
# Original PASST was trained on AudioSet with 10s-long audio samples
max_duration = 10
min_duration = 0.15
min_duration = 0.15
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install hear21passt to compute KL divergence: ",
"pip install 'git+https://github.com/kkoutini/[email protected]#egg=hear21passt'"
)
model_sample_rate = 32_000
max_input_frames = int(max_duration * model_sample_rate)
min_input_frames = int(min_duration * model_sample_rate)
with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f):
model = get_basic_model(mode='logits')
return model, model_sample_rate, max_input_frames, min_input_frames
def _process_audio(self, wav: torch.Tensor, sample_rate: int, wav_len: int) -> tp.List[torch.Tensor]:
"""Process audio to feed to the pretrained model."""
wav = wav.unsqueeze(0)
wav = wav[..., :wav_len]
wav = convert_audio(wav, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1)
wav = wav.squeeze(0)
# we don't pad but return a list of audio segments as this otherwise affects the KLD computation
segments = torch.split(wav, self.max_input_frames, dim=-1)
valid_segments = []
for s in segments:
# ignoring too small segments that are breaking the model inference
if s.size(-1) > self.min_input_frames:
valid_segments.append(s)
return [s[None] for s in valid_segments]
def _get_model_preds(self, wav: torch.Tensor) -> torch.Tensor:
"""Run the pretrained model and get the predictions."""
assert wav.dim() == 3, f"Unexpected number of dims for preprocessed wav: {wav.shape}"
wav = wav.mean(dim=1)
# PaSST is printing a lot of garbage that we are not interested in
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
with torch.no_grad(), _patch_passt_stft():
logits = self.model(wav.to(self.device))
probs = torch.softmax(logits, dim=-1)
return probs
def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor,
sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]:
"""Get model output given provided input tensor.
Args:
x (torch.Tensor): Input audio tensor of shape [B, C, T].
sizes (torch.Tensor): Actual audio sample length, of shape [B].
sample_rates (torch.Tensor): Actual audio sample rate, of shape [B].
Returns:
probs (torch.Tensor, optional): Probabilities over labels, of shape [B, num_classes].
"""
all_probs: tp.List[torch.Tensor] = []
for i, wav in enumerate(x):
sample_rate = int(sample_rates[i].item())
wav_len = int(sizes[i].item())
wav_segments = self._process_audio(wav, sample_rate, wav_len)
for segment in wav_segments:
probs = self._get_model_preds(segment).mean(dim=0)
all_probs.append(probs)
if len(all_probs) > 0:
return torch.stack(all_probs, dim=0)
else:
return None
| audiocraft-main | audiocraft/metrics/kld.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
import json
import logging
from pathlib import Path
import tempfile
import typing as tp
import subprocess
import shutil
import torch
import torchaudio
logger = logging.getLogger(__name__)
class ViSQOL:
"""ViSQOL wrapper to run ViSQOL from Python using a pre-installed binary.
To learn more about ViSQOL and how to build ViSQOL binary using bazel, please refer to the
instructions available in the open source repository: https://github.com/google/visqol
ViSQOL is capable of running in two modes:
Audio Mode:
When running in audio mode, input signals must have a 48kHz sample rate. Input should be resampled to 48kHz.
Input signals can be multi-channel, but they will be down-mixed to mono for performing the comparison.
Audio mode uses support vector regression, with the maximum range at ~4.75.
Speech Mode:
When running in speech mode, ViSQOL uses a wideband model. It therefore expects input sample rates of 16kHz.
Input should be resampled to 16kHz.
As part of the speech mode processing, a root mean square implementation for voice activity detection
is performed on the reference signal to determine what parts of the signal have voice activity and
should therefore be included in the comparison. The signal is normalized before performing the voice
activity detection.
Input signals can be multi-channel, but they will be down-mixed to mono for performing the comparison.
Speech mode is scaled to have a maximum MOS of 5.0 to match previous version behavior.
For more details, check the guidelines: https://github.com/google/visqol#general-guidelines-for-input
Args:
visqol_bin (str): Path to the ViSQOL binary.
mode (str): ViSQOL computation mode, expecting "audio" or "speech".
model (str): Name of the model to use for similarity to quality model.
debug (bool): Whether to also get debug metrics from ViSQOL or not.
"""
SAMPLE_RATES_MODES = {"audio": 48_000, "speech": 16_000}
ALLOWED_SAMPLE_RATES = frozenset(SAMPLE_RATES_MODES.values())
def __init__(self, bin: tp.Union[Path, str], mode: str = "audio",
model: str = "libsvm_nu_svr_model.txt", debug: bool = False):
assert bin is not None and Path(bin).exists(), f"Could not find ViSQOL binary in specified path: {bin}"
self.visqol_bin = str(bin)
self.visqol_mode = mode
self.target_sr = self._get_target_sr(self.visqol_mode)
self.model = model
self.debug = debug
assert Path(self.visqol_model).exists(), \
f"Could not find the specified model in ViSQOL install: {self.visqol_model}"
def _get_target_sr(self, mode: str) -> int:
# returns target sampling rate for the corresponding ViSQOL mode.
if mode not in ViSQOL.SAMPLE_RATES_MODES:
raise ValueError(
f"Unsupported mode! Allowed are: {', '.join(ViSQOL.SAMPLE_RATES_MODES.keys())}"
)
return ViSQOL.SAMPLE_RATES_MODES[mode]
def _prepare_files(
self, ref_sig: torch.Tensor, deg_sig: torch.Tensor, sr: int, target_sr: int, pad_with_silence: bool = False
):
# prepare files for ViSQOL evaluation.
assert target_sr in ViSQOL.ALLOWED_SAMPLE_RATES
assert len(ref_sig) == len(deg_sig), (
"Expects same number of ref and degraded inputs",
f" but ref len {len(ref_sig)} != deg len {len(deg_sig)}"
)
# resample audio if needed
if sr != target_sr:
transform = torchaudio.transforms.Resample(sr, target_sr)
pad = int(0.5 * target_sr)
rs_ref = []
rs_deg = []
for i in range(len(ref_sig)):
rs_ref_i = transform(ref_sig[i])
rs_deg_i = transform(deg_sig[i])
if pad_with_silence:
rs_ref_i = torch.nn.functional.pad(rs_ref_i, (pad, pad), mode='constant', value=0)
rs_deg_i = torch.nn.functional.pad(rs_deg_i, (pad, pad), mode='constant', value=0)
rs_ref.append(rs_ref_i)
rs_deg.append(rs_deg_i)
ref_sig = torch.stack(rs_ref)
deg_sig = torch.stack(rs_deg)
# save audio chunks to tmp dir and create csv
tmp_dir = Path(tempfile.mkdtemp())
try:
tmp_input_csv_path = tmp_dir / "input.csv"
tmp_results_csv_path = tmp_dir / "results.csv"
tmp_debug_json_path = tmp_dir / "debug.json"
with open(tmp_input_csv_path, "w") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(["reference", "degraded"])
for i in range(len(ref_sig)):
tmp_ref_filename = tmp_dir / f"ref_{i}.wav"
tmp_deg_filename = tmp_dir / f"deg_{i}.wav"
torchaudio.save(
tmp_ref_filename,
torch.clamp(ref_sig[i], min=-0.99, max=0.99),
sample_rate=target_sr,
bits_per_sample=16,
encoding="PCM_S"
)
torchaudio.save(
tmp_deg_filename,
torch.clamp(deg_sig[i], min=-0.99, max=0.99),
sample_rate=target_sr,
bits_per_sample=16,
encoding="PCM_S"
)
csv_writer.writerow([str(tmp_ref_filename), str(tmp_deg_filename)])
return tmp_dir, tmp_input_csv_path, tmp_results_csv_path, tmp_debug_json_path
except Exception as e:
logger.error("Exception occurred when preparing files for ViSQOL: %s", e)
return tmp_dir, None, None, None
def _flush_files(self, tmp_dir: tp.Union[Path, str]):
# flush tmp files used to compute ViSQOL.
shutil.rmtree(str(tmp_dir))
def _collect_moslqo_score(self, results_csv_path: tp.Union[Path, str]) -> float:
# collect results for each evaluated pair and return averaged moslqo score.
with open(results_csv_path, "r") as csv_file:
reader = csv.DictReader(csv_file)
moslqo_scores = [float(row["moslqo"]) for row in reader]
if len(moslqo_scores) > 0:
return sum(moslqo_scores) / len(moslqo_scores)
else:
return 0.0
def _collect_debug_data(self, debug_json_path: tp.Union[Path, str]) -> dict:
# collect debug data for the visqol inference.
with open(debug_json_path, "r") as f:
data = json.load(f)
return data
@property
def visqol_model(self):
return f'{self.visqol_bin}/model/{self.model}'
def _run_visqol(
self,
input_csv_path: tp.Union[Path, str],
results_csv_path: tp.Union[Path, str],
debug_csv_path: tp.Optional[tp.Union[Path, str]],
):
input_csv_path = str(input_csv_path)
results_csv_path = str(results_csv_path)
debug_csv_path = str(debug_csv_path)
cmd = [
f'{self.visqol_bin}/bazel-bin/visqol',
'--batch_input_csv', f'{input_csv_path}',
'--results_csv', f'{results_csv_path}'
]
if debug_csv_path is not None:
cmd += ['--output_debug', f'{debug_csv_path}']
if self.visqol_mode == "speech":
cmd += ['--use_speech_mode']
cmd += ['--similarity_to_quality_model', f'{self.visqol_model}']
result = subprocess.run(cmd, capture_output=True)
if result.returncode:
logger.error("Error with visqol: \n %s \n %s", result.stdout.decode(), result.stderr.decode())
raise RuntimeError("Error while executing visqol")
result.check_returncode()
def __call__(
self,
ref_sig: torch.Tensor,
deg_sig: torch.Tensor,
sr: int,
pad_with_silence: bool = False,
):
"""Calculate the ViSQOL metric for a pair of audio signals at a given sample rate.
Args:
ref_sig (torch.Tensor): Reference signals as [B, C, T].
deg_sig (torch.Tensor): Degraded signals as [B, C, T].
sr (int): Sample rate of the two audio signals.
pad_with_silence (bool): Whether to pad the file with silences as recommended
in visqol guidelines (see: https://github.com/google/visqol#general-guidelines-for-input).
Returns:
float: The ViSQOL score or mean score for the batch.
"""
logger.debug(f"Calculating visqol with mode={self.visqol_mode} on {len(ref_sig)} samples")
tmp_dir, input_csv, results_csv, debug_json = self._prepare_files(
ref_sig, deg_sig, sr, self.target_sr, pad_with_silence
)
try:
if input_csv and results_csv:
self._run_visqol(
input_csv,
results_csv,
debug_json if self.debug else None,
)
mosqol = self._collect_moslqo_score(results_csv)
return mosqol
else:
raise RuntimeError("Something unexpected happened when running VISQOL!")
except Exception as e:
logger.error("Exception occurred when running ViSQOL: %s", e)
finally:
self._flush_files(tmp_dir)
| audiocraft-main | audiocraft/metrics/visqol.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
from torch import nn
import torchaudio
def db_to_scale(volume: tp.Union[float, torch.Tensor]):
return 10 ** (volume / 20)
def scale_to_db(scale: torch.Tensor, min_volume: float = -120):
min_scale = db_to_scale(min_volume)
return 20 * torch.log10(scale.clamp(min=min_scale))
class RelativeVolumeMel(nn.Module):
"""Relative volume melspectrogram measure.
Computes a measure of distance over two mel spectrogram that is interpretable in terms
of decibels. Given `x_ref` and `x_est` two waveforms of shape `[*, T]`, it will
first renormalize both by the ground truth of `x_ref`.
..Warning:: This class returns the volume of the distortion at the spectrogram level,
e.g. low negative values reflects lower distortion levels. For a SNR (like reported
in the MultiBandDiffusion paper), just take `-rvm`.
Then it computes the mel spectrogram `z_ref` and `z_est` and compute volume of the difference
relative to the volume of `z_ref` for each time-frequency bin. It further adds some limits, e.g.
clamping the values between -25 and 25 dB (controlled by `min_relative_volume` and `max_relative_volume`)
with the goal of avoiding the loss being dominated by parts where the reference is almost silent.
Indeed, volumes in dB can take unbounded values both towards -oo and +oo, which can make the final
average metric harder to interpret. Besides, anything below -30 dB of attenuation would sound extremely
good (for a neural network output, although sound engineers typically aim for much lower attenuations).
Similarly, anything above +30 dB would just be completely missing the target, and there is no point
in measuring by exactly how much it missed it. -25, 25 is a more conservative range, but also more
in line with what neural nets currently can achieve.
For instance, a Relative Volume Mel (RVM) score of -10 dB means that on average, the delta between
the target and reference mel-spec is 10 dB lower than the reference mel-spec value.
The metric can be aggregated over a given frequency band in order have different insights for
different region of the spectrum. `num_aggregated_bands` controls the number of bands.
..Warning:: While this function is optimized for interpretability, nothing was done to ensure it
is numerically stable when computing its gradient. We thus advise against using it as a training loss.
Args:
sample_rate (int): Sample rate of the input audio.
n_mels (int): Number of mel bands to use.
n_fft (int): Number of frequency bins for the STFT.
hop_length (int): Hop length of the STFT and the mel-spectrogram.
min_relative_volume (float): The error `z_ref - z_est` volume is given relative to
the volume of `z_ref`. If error is smaller than -25 dB of `z_ref`, then it is clamped.
max_relative_volume (float): Same as `min_relative_volume` but clamping if the error is larger than that.
max_initial_gain (float): When rescaling the audio at the very beginning, we will limit the gain
to that amount, to avoid rescaling near silence. Given in dB.
min_activity_volume (float): When computing the reference level from `z_ref`, will clamp low volume
bins to that amount. This is effectively our "zero" level for the reference mel-spectrogram,
and anything below that will be considered equally.
num_aggregated_bands (int): Number of bands to keep when computing the average RVM value.
For instance, a value of 3 would give 3 scores, roughly for low, mid and high freqs.
"""
def __init__(self, sample_rate: int = 24000, n_mels: int = 80, n_fft: int = 512,
hop_length: int = 128, min_relative_volume: float = -25,
max_relative_volume: float = 25, max_initial_gain: float = 25,
min_activity_volume: float = -25,
num_aggregated_bands: int = 4) -> None:
super().__init__()
self.melspec = torchaudio.transforms.MelSpectrogram(
n_mels=n_mels, n_fft=n_fft, hop_length=hop_length,
normalized=True, sample_rate=sample_rate, power=2)
self.min_relative_volume = min_relative_volume
self.max_relative_volume = max_relative_volume
self.max_initial_gain = max_initial_gain
self.min_activity_volume = min_activity_volume
self.num_aggregated_bands = num_aggregated_bands
def forward(self, estimate: torch.Tensor, ground_truth: torch.Tensor) -> tp.Dict[str, torch.Tensor]:
"""Compute RVM metric between estimate and reference samples.
Args:
estimate (torch.Tensor): Estimate sample.
ground_truth (torch.Tensor): Reference sample.
Returns:
dict[str, torch.Tensor]: Metrics with keys `rvm` for the overall average, and `rvm_{k}`
for the RVM over the k-th band (k=0..num_aggregated_bands - 1).
"""
min_scale = db_to_scale(-self.max_initial_gain)
std = ground_truth.pow(2).mean().sqrt().clamp(min=min_scale)
z_gt = self.melspec(ground_truth / std).sqrt()
z_est = self.melspec(estimate / std).sqrt()
delta = z_gt - z_est
ref_db = scale_to_db(z_gt, self.min_activity_volume)
delta_db = scale_to_db(delta.abs(), min_volume=-120)
relative_db = (delta_db - ref_db).clamp(self.min_relative_volume, self.max_relative_volume)
dims = list(range(relative_db.dim()))
dims.remove(dims[-2])
losses_per_band = relative_db.mean(dim=dims)
aggregated = [chunk.mean() for chunk in losses_per_band.chunk(self.num_aggregated_bands, dim=0)]
metrics = {f'rvm_{index}': value for index, value in enumerate(aggregated)}
metrics['rvm'] = losses_per_band.mean()
return metrics
| audiocraft-main | audiocraft/metrics/rvm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import torch
from torch import nn
from torch.nn import functional as F
def _unfold(a: torch.Tensor, kernel_size: int, stride: int) -> torch.Tensor:
"""Given input of size [*OT, T], output Tensor of size [*OT, F, K]
with K the kernel size, by extracting frames with the given stride.
This will pad the input so that `F = ceil(T / K)`.
see https://github.com/pytorch/pytorch/issues/60466
"""
*shape, length = a.shape
n_frames = math.ceil(length / stride)
tgt_length = (n_frames - 1) * stride + kernel_size
a = F.pad(a, (0, tgt_length - length))
strides = list(a.stride())
assert strides[-1] == 1, "data should be contiguous"
strides = strides[:-1] + [stride, 1]
return a.as_strided([*shape, n_frames, kernel_size], strides)
def _center(x: torch.Tensor) -> torch.Tensor:
return x - x.mean(-1, True)
def _norm2(x: torch.Tensor) -> torch.Tensor:
return x.pow(2).sum(-1, True)
class SISNR(nn.Module):
"""SISNR loss.
Input should be [B, C, T], output is scalar.
..Warning:: This function returns the opposite of the SI-SNR (e.g. `-1 * regular_SI_SNR`).
Consequently, lower scores are better in terms of reconstruction quality,
in particular, it should be negative if training goes well. This done this way so
that this module can also be used as a loss function for training model.
Args:
sample_rate (int): Sample rate.
segment (float or None): Evaluate on chunks of that many seconds. If None, evaluate on
entire audio only.
overlap (float): Overlap between chunks, i.e. 0.5 = 50 % overlap.
epsilon (float): Epsilon value for numerical stability.
"""
def __init__(
self,
sample_rate: int = 16000,
segment: tp.Optional[float] = 20,
overlap: float = 0.5,
epsilon: float = torch.finfo(torch.float32).eps,
):
super().__init__()
self.sample_rate = sample_rate
self.segment = segment
self.overlap = overlap
self.epsilon = epsilon
def forward(self, out_sig: torch.Tensor, ref_sig: torch.Tensor) -> torch.Tensor:
B, C, T = ref_sig.shape
assert ref_sig.shape == out_sig.shape
if self.segment is None:
frame = T
stride = T
else:
frame = int(self.segment * self.sample_rate)
stride = int(frame * (1 - self.overlap))
epsilon = self.epsilon * frame # make epsilon prop to frame size.
gt = _unfold(ref_sig, frame, stride)
est = _unfold(out_sig, frame, stride)
if self.segment is None:
assert gt.shape[-1] == 1
gt = _center(gt)
est = _center(est)
dot = torch.einsum("bcft,bcft->bcf", gt, est)
proj = dot[:, :, :, None] * gt / (epsilon + _norm2(gt))
noise = est - proj
sisnr = 10 * (
torch.log10(epsilon + _norm2(proj)) - torch.log10(epsilon + _norm2(noise))
)
return -1 * sisnr[..., 0].mean()
| audiocraft-main | audiocraft/losses/sisnr.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Loss related classes and functions. In particular the loss balancer from
EnCodec, and the usual spectral losses."""
# flake8: noqa
from .balancer import Balancer
from .sisnr import SISNR
from .stftloss import (
LogSTFTMagnitudeLoss,
MRSTFTLoss,
SpectralConvergenceLoss,
STFTLoss
)
from .specloss import (
MelSpectrogramL1Loss,
MultiScaleMelSpectrogramLoss,
)
| audiocraft-main | audiocraft/losses/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from MIT code under the original license
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
import typing as tp
import torch
from torch import nn
from torch.nn import functional as F
# TODO: Replace with torchaudio.STFT?
def _stft(x: torch.Tensor, fft_size: int, hop_length: int, win_length: int,
window: tp.Optional[torch.Tensor], normalized: bool) -> torch.Tensor:
"""Perform STFT and convert to magnitude spectrogram.
Args:
x: Input signal tensor (B, C, T).
fft_size (int): FFT size.
hop_length (int): Hop size.
win_length (int): Window length.
window (torch.Tensor or None): Window function type.
normalized (bool): Whether to normalize the STFT or not.
Returns:
torch.Tensor: Magnitude spectrogram (B, C, #frames, fft_size // 2 + 1).
"""
B, C, T = x.shape
x_stft = torch.stft(
x.view(-1, T), fft_size, hop_length, win_length, window,
normalized=normalized, return_complex=True,
)
x_stft = x_stft.view(B, C, *x_stft.shape[1:])
real = x_stft.real
imag = x_stft.imag
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
class SpectralConvergenceLoss(nn.Module):
"""Spectral convergence loss.
"""
def __init__(self, epsilon: float = torch.finfo(torch.float32).eps):
super().__init__()
self.epsilon = epsilon
def forward(self, x_mag: torch.Tensor, y_mag: torch.Tensor):
"""Calculate forward propagation.
Args:
x_mag: Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag: Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
torch.Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p="fro") / (torch.norm(y_mag, p="fro") + self.epsilon)
class LogSTFTMagnitudeLoss(nn.Module):
"""Log STFT magnitude loss.
Args:
epsilon (float): Epsilon value for numerical stability.
"""
def __init__(self, epsilon: float = torch.finfo(torch.float32).eps):
super().__init__()
self.epsilon = epsilon
def forward(self, x_mag: torch.Tensor, y_mag: torch.Tensor):
"""Calculate forward propagation.
Args:
x_mag (torch.Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (torch.Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
torch.Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(self.epsilon + y_mag), torch.log(self.epsilon + x_mag))
class STFTLosses(nn.Module):
"""STFT losses.
Args:
n_fft (int): Size of FFT.
hop_length (int): Hop length.
win_length (int): Window length.
window (str): Window function type.
normalized (bool): Whether to use normalized STFT or not.
epsilon (float): Epsilon for numerical stability.
"""
def __init__(self, n_fft: int = 1024, hop_length: int = 120, win_length: int = 600,
window: str = "hann_window", normalized: bool = False,
epsilon: float = torch.finfo(torch.float32).eps):
super().__init__()
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.normalized = normalized
self.register_buffer("window", getattr(torch, window)(win_length))
self.spectral_convergenge_loss = SpectralConvergenceLoss(epsilon)
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss(epsilon)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (torch.Tensor): Predicted signal (B, T).
y (torch.Tensor): Groundtruth signal (B, T).
Returns:
torch.Tensor: Spectral convergence loss value.
torch.Tensor: Log STFT magnitude loss value.
"""
x_mag = _stft(x, self.n_fft, self.hop_length,
self.win_length, self.window, self.normalized) # type: ignore
y_mag = _stft(y, self.n_fft, self.hop_length,
self.win_length, self.window, self.normalized) # type: ignore
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class STFTLoss(nn.Module):
"""Single Resolution STFT loss.
Args:
n_fft (int): Nb of FFT.
hop_length (int): Hop length.
win_length (int): Window length.
window (str): Window function type.
normalized (bool): Whether to use normalized STFT or not.
epsilon (float): Epsilon for numerical stability.
factor_sc (float): Coefficient for the spectral loss.
factor_mag (float): Coefficient for the magnitude loss.
"""
def __init__(self, n_fft: int = 1024, hop_length: int = 120, win_length: int = 600,
window: str = "hann_window", normalized: bool = False,
factor_sc: float = 0.1, factor_mag: float = 0.1,
epsilon: float = torch.finfo(torch.float32).eps):
super().__init__()
self.loss = STFTLosses(n_fft, hop_length, win_length, window, normalized, epsilon)
self.factor_sc = factor_sc
self.factor_mag = factor_mag
def forward(self, x: torch.Tensor, y: torch.Tensor) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Calculate forward propagation.
Args:
x (torch.Tensor): Predicted signal (B, T).
y (torch.Tensor): Groundtruth signal (B, T).
Returns:
torch.Tensor: Single resolution STFT loss.
"""
sc_loss, mag_loss = self.loss(x, y)
return self.factor_sc * sc_loss + self.factor_mag * mag_loss
class MRSTFTLoss(nn.Module):
"""Multi resolution STFT loss.
Args:
n_ffts (Sequence[int]): Sequence of FFT sizes.
hop_lengths (Sequence[int]): Sequence of hop sizes.
win_lengths (Sequence[int]): Sequence of window lengths.
window (str): Window function type.
factor_sc (float): Coefficient for the spectral loss.
factor_mag (float): Coefficient for the magnitude loss.
normalized (bool): Whether to use normalized STFT or not.
epsilon (float): Epsilon for numerical stability.
"""
def __init__(self, n_ffts: tp.Sequence[int] = [1024, 2048, 512], hop_lengths: tp.Sequence[int] = [120, 240, 50],
win_lengths: tp.Sequence[int] = [600, 1200, 240], window: str = "hann_window",
factor_sc: float = 0.1, factor_mag: float = 0.1,
normalized: bool = False, epsilon: float = torch.finfo(torch.float32).eps):
super().__init__()
assert len(n_ffts) == len(hop_lengths) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(n_ffts, hop_lengths, win_lengths):
self.stft_losses += [STFTLosses(fs, ss, wl, window, normalized, epsilon)]
self.factor_sc = factor_sc
self.factor_mag = factor_mag
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Calculate forward propagation.
Args:
x (torch.Tensor): Predicted signal (B, T).
y (torch.Tensor): Groundtruth signal (B, T).
Returns:
torch.Tensor: Multi resolution STFT loss.
"""
sc_loss = torch.Tensor([0.0])
mag_loss = torch.Tensor([0.0])
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return self.factor_sc * sc_loss + self.factor_mag * mag_loss
| audiocraft-main | audiocraft/losses/stftloss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import numpy as np
from torchaudio.transforms import MelSpectrogram
import torch
from torch import nn
from torch.nn import functional as F
from ..modules import pad_for_conv1d
class MelSpectrogramWrapper(nn.Module):
"""Wrapper around MelSpectrogram torchaudio transform providing proper padding
and additional post-processing including log scaling.
Args:
n_mels (int): Number of mel bins.
n_fft (int): Number of fft.
hop_length (int): Hop size.
win_length (int): Window length.
n_mels (int): Number of mel bins.
sample_rate (int): Sample rate.
f_min (float or None): Minimum frequency.
f_max (float or None): Maximum frequency.
log (bool): Whether to scale with log.
normalized (bool): Whether to normalize the melspectrogram.
floor_level (float): Floor level based on human perception (default=1e-5).
"""
def __init__(self, n_fft: int = 1024, hop_length: int = 256, win_length: tp.Optional[int] = None,
n_mels: int = 80, sample_rate: float = 22050, f_min: float = 0.0, f_max: tp.Optional[float] = None,
log: bool = True, normalized: bool = False, floor_level: float = 1e-5):
super().__init__()
self.n_fft = n_fft
hop_length = int(hop_length)
self.hop_length = hop_length
self.mel_transform = MelSpectrogram(n_mels=n_mels, sample_rate=sample_rate, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, f_min=f_min, f_max=f_max, normalized=normalized,
window_fn=torch.hann_window, center=False)
self.floor_level = floor_level
self.log = log
def forward(self, x):
p = int((self.n_fft - self.hop_length) // 2)
if len(x.shape) == 2:
x = x.unsqueeze(1)
x = F.pad(x, (p, p), "reflect")
# Make sure that all the frames are full.
# The combination of `pad_for_conv1d` and the above padding
# will make the output of size ceil(T / hop).
x = pad_for_conv1d(x, self.n_fft, self.hop_length)
self.mel_transform.to(x.device)
mel_spec = self.mel_transform(x)
B, C, freqs, frame = mel_spec.shape
if self.log:
mel_spec = torch.log10(self.floor_level + mel_spec)
return mel_spec.reshape(B, C * freqs, frame)
class MelSpectrogramL1Loss(torch.nn.Module):
"""L1 Loss on MelSpectrogram.
Args:
sample_rate (int): Sample rate.
n_fft (int): Number of fft.
hop_length (int): Hop size.
win_length (int): Window length.
n_mels (int): Number of mel bins.
f_min (float or None): Minimum frequency.
f_max (float or None): Maximum frequency.
log (bool): Whether to scale with log.
normalized (bool): Whether to normalize the melspectrogram.
floor_level (float): Floor level value based on human perception (default=1e-5).
"""
def __init__(self, sample_rate: int, n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024,
n_mels: int = 80, f_min: float = 0.0, f_max: tp.Optional[float] = None,
log: bool = True, normalized: bool = False, floor_level: float = 1e-5):
super().__init__()
self.l1 = torch.nn.L1Loss()
self.melspec = MelSpectrogramWrapper(n_fft=n_fft, hop_length=hop_length, win_length=win_length,
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
log=log, normalized=normalized, floor_level=floor_level)
def forward(self, x, y):
self.melspec.to(x.device)
s_x = self.melspec(x)
s_y = self.melspec(y)
return self.l1(s_x, s_y)
class MultiScaleMelSpectrogramLoss(nn.Module):
"""Multi-Scale spectrogram loss (msspec).
Args:
sample_rate (int): Sample rate.
range_start (int): Power of 2 to use for the first scale.
range_stop (int): Power of 2 to use for the last scale.
n_mels (int): Number of mel bins.
f_min (float): Minimum frequency.
f_max (float or None): Maximum frequency.
normalized (bool): Whether to normalize the melspectrogram.
alphas (bool): Whether to use alphas as coefficients or not.
floor_level (float): Floor level value based on human perception (default=1e-5).
"""
def __init__(self, sample_rate: int, range_start: int = 6, range_end: int = 11,
n_mels: int = 64, f_min: float = 0.0, f_max: tp.Optional[float] = None,
normalized: bool = False, alphas: bool = True, floor_level: float = 1e-5):
super().__init__()
l1s = list()
l2s = list()
self.alphas = list()
self.total = 0
self.normalized = normalized
for i in range(range_start, range_end):
l1s.append(
MelSpectrogramWrapper(n_fft=2 ** i, hop_length=(2 ** i) / 4, win_length=2 ** i,
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
log=False, normalized=normalized, floor_level=floor_level))
l2s.append(
MelSpectrogramWrapper(n_fft=2 ** i, hop_length=(2 ** i) / 4, win_length=2 ** i,
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
log=True, normalized=normalized, floor_level=floor_level))
if alphas:
self.alphas.append(np.sqrt(2 ** i - 1))
else:
self.alphas.append(1)
self.total += self.alphas[-1] + 1
self.l1s = nn.ModuleList(l1s)
self.l2s = nn.ModuleList(l2s)
def forward(self, x, y):
loss = 0.0
self.l1s.to(x.device)
self.l2s.to(x.device)
for i in range(len(self.alphas)):
s_x_1 = self.l1s[i](x)
s_y_1 = self.l1s[i](y)
s_x_2 = self.l2s[i](x)
s_y_2 = self.l2s[i](y)
loss += F.l1_loss(s_x_1, s_y_1) + self.alphas[i] * F.mse_loss(s_x_2, s_y_2)
if self.normalized:
loss = loss / self.total
return loss
| audiocraft-main | audiocraft/losses/specloss.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import flashy
import torch
from torch import autograd
class Balancer:
"""Loss balancer.
The loss balancer combines losses together to compute gradients for the backward.
Given `y = f(...)`, and a number of losses `l1(y, ...)`, `l2(y, ...)`, with `...`
not having any dependence on `f`, the balancer can efficiently normalize the partial gradients
`d l1 / d y`, `d l2 / dy` before summing them in order to achieve a desired ratio between
the losses. For instance if `weights = {'l1': 2, 'l2': 1}`, 66% of the gradient
going into `f(...)` will come from `l1` on average, and 33% from `l2`. This allows for an easy
interpration of the weights even if the intrisic scale of `l1`, `l2` ... is unknown.
Noting `g1 = d l1 / dy`, etc., the balanced gradient `G` will be
(with `avg` an exponential moving average over the updates),
G = sum_i total_norm * g_i / avg(||g_i||) * w_i / sum(w_i)
If `balance_grads` is False, this is deactivated, and instead the gradient will just be the
standard sum of the partial gradients with the given weights.
A call to the backward method of the balancer will compute the the partial gradients,
combining all the losses and potentially rescaling the gradients,
which can help stabilize the training and reason about multiple losses with varying scales.
The obtained gradient with respect to `y` is then back-propagated to `f(...)`.
Expected usage:
weights = {'loss_a': 1, 'loss_b': 4}
balancer = Balancer(weights, ...)
losses: dict = {}
losses['loss_a'] = compute_loss_a(x, y)
losses['loss_b'] = compute_loss_b(x, y)
if model.training():
effective_loss = balancer.backward(losses, x)
Args:
weights (dict[str, float]): Weight coefficient for each loss. The balancer expect the losses keys
from the backward method to match the weights keys to assign weight to each of the provided loss.
balance_grads (bool): Whether to rescale gradients so that weights reflect the fraction of the
overall gradient, rather than a constant multiplier.
total_norm (float): Reference norm when rescaling gradients, ignored otherwise.
emay_decay (float): EMA decay for averaging the norms.
per_batch_item (bool): Whether to compute the averaged norm per batch item or not. This only holds
when rescaling the gradients.
epsilon (float): Epsilon value for numerical stability.
monitor (bool): If True, stores in `self.metrics` the relative ratio between the norm of the gradients
coming from each loss, when calling `backward()`.
"""
def __init__(self, weights: tp.Dict[str, float], balance_grads: bool = True, total_norm: float = 1.,
ema_decay: float = 0.999, per_batch_item: bool = True, epsilon: float = 1e-12,
monitor: bool = False):
self.weights = weights
self.per_batch_item = per_batch_item
self.total_norm = total_norm or 1.
self.averager = flashy.averager(ema_decay or 1.)
self.epsilon = epsilon
self.monitor = monitor
self.balance_grads = balance_grads
self._metrics: tp.Dict[str, tp.Any] = {}
@property
def metrics(self):
return self._metrics
def backward(self, losses: tp.Dict[str, torch.Tensor], input: torch.Tensor) -> torch.Tensor:
"""Compute the backward and return the effective train loss, e.g. the loss obtained from
computing the effective weights. If `balance_grads` is True, the effective weights
are the one that needs to be applied to each gradient to respect the desired relative
scale of gradients coming from each loss.
Args:
losses (Dict[str, torch.Tensor]): dictionary with the same keys as `self.weights`.
input (torch.Tensor): the input of the losses, typically the output of the model.
This should be the single point of dependence between the losses
and the model being trained.
"""
norms = {}
grads = {}
for name, loss in losses.items():
# Compute partial derivative of the less with respect to the input.
grad, = autograd.grad(loss, [input], retain_graph=True)
if self.per_batch_item:
# We do not average the gradient over the batch dimension.
dims = tuple(range(1, grad.dim()))
norm = grad.norm(dim=dims, p=2).mean()
else:
norm = grad.norm(p=2)
norms[name] = norm
grads[name] = grad
count = 1
if self.per_batch_item:
count = len(grad)
# Average norms across workers. Theoretically we should average the
# squared norm, then take the sqrt, but it worked fine like that.
avg_norms = flashy.distrib.average_metrics(self.averager(norms), count)
# We approximate the total norm of the gradient as the sums of the norms.
# Obviously this can be very incorrect if all gradients are aligned, but it works fine.
total = sum(avg_norms.values())
self._metrics = {}
if self.monitor:
# Store the ratio of the total gradient represented by each loss.
for k, v in avg_norms.items():
self._metrics[f'ratio_{k}'] = v / total
total_weights = sum([self.weights[k] for k in avg_norms])
assert total_weights > 0.
desired_ratios = {k: w / total_weights for k, w in self.weights.items()}
out_grad = torch.zeros_like(input)
effective_loss = torch.tensor(0., device=input.device, dtype=input.dtype)
for name, avg_norm in avg_norms.items():
if self.balance_grads:
# g_balanced = g / avg(||g||) * total_norm * desired_ratio
scale = desired_ratios[name] * self.total_norm / (self.epsilon + avg_norm)
else:
# We just do regular weighted sum of the gradients.
scale = self.weights[name]
out_grad.add_(grads[name], alpha=scale)
effective_loss += scale * losses[name].detach()
# Send the computed partial derivative with respect to the output of the model to the model.
input.backward(out_grad)
return effective_loss
| audiocraft-main | audiocraft/losses/balancer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Adversarial losses and discriminator architectures."""
# flake8: noqa
from .discriminators import (
MultiPeriodDiscriminator,
MultiScaleDiscriminator,
MultiScaleSTFTDiscriminator
)
from .losses import (
AdversarialLoss,
AdvLossType,
get_adv_criterion,
get_fake_criterion,
get_real_criterion,
FeatLossType,
FeatureMatchingLoss
)
| audiocraft-main | audiocraft/adversarial/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility module to handle adversarial losses without requiring to mess up the main training loop.
"""
import typing as tp
import flashy
import torch
import torch.nn as nn
import torch.nn.functional as F
ADVERSARIAL_LOSSES = ['mse', 'hinge', 'hinge2']
AdvLossType = tp.Union[nn.Module, tp.Callable[[torch.Tensor], torch.Tensor]]
FeatLossType = tp.Union[nn.Module, tp.Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]
class AdversarialLoss(nn.Module):
"""Adversary training wrapper.
Args:
adversary (nn.Module): The adversary module will be used to estimate the logits given the fake and real samples.
We assume here the adversary output is ``Tuple[List[torch.Tensor], List[List[torch.Tensor]]]``
where the first item is a list of logits and the second item is a list of feature maps.
optimizer (torch.optim.Optimizer): Optimizer used for training the given module.
loss (AdvLossType): Loss function for generator training.
loss_real (AdvLossType): Loss function for adversarial training on logits from real samples.
loss_fake (AdvLossType): Loss function for adversarial training on logits from fake samples.
loss_feat (FeatLossType): Feature matching loss function for generator training.
normalize (bool): Whether to normalize by number of sub-discriminators.
Example of usage:
adv_loss = AdversarialLoss(adversaries, optimizer, loss, loss_real, loss_fake)
for real in loader:
noise = torch.randn(...)
fake = model(noise)
adv_loss.train_adv(fake, real)
loss, _ = adv_loss(fake, real)
loss.backward()
"""
def __init__(self,
adversary: nn.Module,
optimizer: torch.optim.Optimizer,
loss: AdvLossType,
loss_real: AdvLossType,
loss_fake: AdvLossType,
loss_feat: tp.Optional[FeatLossType] = None,
normalize: bool = True):
super().__init__()
self.adversary: nn.Module = adversary
flashy.distrib.broadcast_model(self.adversary)
self.optimizer = optimizer
self.loss = loss
self.loss_real = loss_real
self.loss_fake = loss_fake
self.loss_feat = loss_feat
self.normalize = normalize
def _save_to_state_dict(self, destination, prefix, keep_vars):
# Add the optimizer state dict inside our own.
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'optimizer'] = self.optimizer.state_dict()
return destination
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
# Load optimizer state.
self.optimizer.load_state_dict(state_dict.pop(prefix + 'optimizer'))
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def get_adversary_pred(self, x):
"""Run adversary model, validating expected output format."""
logits, fmaps = self.adversary(x)
assert isinstance(logits, list) and all([isinstance(t, torch.Tensor) for t in logits]), \
f'Expecting a list of tensors as logits but {type(logits)} found.'
assert isinstance(fmaps, list), f'Expecting a list of features maps but {type(fmaps)} found.'
for fmap in fmaps:
assert isinstance(fmap, list) and all([isinstance(f, torch.Tensor) for f in fmap]), \
f'Expecting a list of tensors as feature maps but {type(fmap)} found.'
return logits, fmaps
def train_adv(self, fake: torch.Tensor, real: torch.Tensor) -> torch.Tensor:
"""Train the adversary with the given fake and real example.
We assume the adversary output is the following format: Tuple[List[torch.Tensor], List[List[torch.Tensor]]].
The first item being the logits and second item being a list of feature maps for each sub-discriminator.
This will automatically synchronize gradients (with `flashy.distrib.eager_sync_model`)
and call the optimizer.
"""
loss = torch.tensor(0., device=fake.device)
all_logits_fake_is_fake, _ = self.get_adversary_pred(fake.detach())
all_logits_real_is_fake, _ = self.get_adversary_pred(real.detach())
n_sub_adversaries = len(all_logits_fake_is_fake)
for logit_fake_is_fake, logit_real_is_fake in zip(all_logits_fake_is_fake, all_logits_real_is_fake):
loss += self.loss_fake(logit_fake_is_fake) + self.loss_real(logit_real_is_fake)
if self.normalize:
loss /= n_sub_adversaries
self.optimizer.zero_grad()
with flashy.distrib.eager_sync_model(self.adversary):
loss.backward()
self.optimizer.step()
return loss
def forward(self, fake: torch.Tensor, real: torch.Tensor) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Return the loss for the generator, i.e. trying to fool the adversary,
and feature matching loss if provided.
"""
adv = torch.tensor(0., device=fake.device)
feat = torch.tensor(0., device=fake.device)
with flashy.utils.readonly(self.adversary):
all_logits_fake_is_fake, all_fmap_fake = self.get_adversary_pred(fake)
all_logits_real_is_fake, all_fmap_real = self.get_adversary_pred(real)
n_sub_adversaries = len(all_logits_fake_is_fake)
for logit_fake_is_fake in all_logits_fake_is_fake:
adv += self.loss(logit_fake_is_fake)
if self.loss_feat:
for fmap_fake, fmap_real in zip(all_fmap_fake, all_fmap_real):
feat += self.loss_feat(fmap_fake, fmap_real)
if self.normalize:
adv /= n_sub_adversaries
feat /= n_sub_adversaries
return adv, feat
def get_adv_criterion(loss_type: str) -> tp.Callable:
assert loss_type in ADVERSARIAL_LOSSES
if loss_type == 'mse':
return mse_loss
elif loss_type == 'hinge':
return hinge_loss
elif loss_type == 'hinge2':
return hinge2_loss
raise ValueError('Unsupported loss')
def get_fake_criterion(loss_type: str) -> tp.Callable:
assert loss_type in ADVERSARIAL_LOSSES
if loss_type == 'mse':
return mse_fake_loss
elif loss_type in ['hinge', 'hinge2']:
return hinge_fake_loss
raise ValueError('Unsupported loss')
def get_real_criterion(loss_type: str) -> tp.Callable:
assert loss_type in ADVERSARIAL_LOSSES
if loss_type == 'mse':
return mse_real_loss
elif loss_type in ['hinge', 'hinge2']:
return hinge_real_loss
raise ValueError('Unsupported loss')
def mse_real_loss(x: torch.Tensor) -> torch.Tensor:
return F.mse_loss(x, torch.tensor(1., device=x.device).expand_as(x))
def mse_fake_loss(x: torch.Tensor) -> torch.Tensor:
return F.mse_loss(x, torch.tensor(0., device=x.device).expand_as(x))
def hinge_real_loss(x: torch.Tensor) -> torch.Tensor:
return -torch.mean(torch.min(x - 1, torch.tensor(0., device=x.device).expand_as(x)))
def hinge_fake_loss(x: torch.Tensor) -> torch.Tensor:
return -torch.mean(torch.min(-x - 1, torch.tensor(0., device=x.device).expand_as(x)))
def mse_loss(x: torch.Tensor) -> torch.Tensor:
if x.numel() == 0:
return torch.tensor([0.0], device=x.device)
return F.mse_loss(x, torch.tensor(1., device=x.device).expand_as(x))
def hinge_loss(x: torch.Tensor) -> torch.Tensor:
if x.numel() == 0:
return torch.tensor([0.0], device=x.device)
return -x.mean()
def hinge2_loss(x: torch.Tensor) -> torch.Tensor:
if x.numel() == 0:
return torch.tensor([0.0])
return -torch.mean(torch.min(x - 1, torch.tensor(0., device=x.device).expand_as(x)))
class FeatureMatchingLoss(nn.Module):
"""Feature matching loss for adversarial training.
Args:
loss (nn.Module): Loss to use for feature matching (default=torch.nn.L1).
normalize (bool): Whether to normalize the loss.
by number of feature maps.
"""
def __init__(self, loss: nn.Module = torch.nn.L1Loss(), normalize: bool = True):
super().__init__()
self.loss = loss
self.normalize = normalize
def forward(self, fmap_fake: tp.List[torch.Tensor], fmap_real: tp.List[torch.Tensor]) -> torch.Tensor:
assert len(fmap_fake) == len(fmap_real) and len(fmap_fake) > 0
feat_loss = torch.tensor(0., device=fmap_fake[0].device)
feat_scale = torch.tensor(0., device=fmap_fake[0].device)
n_fmaps = 0
for (feat_fake, feat_real) in zip(fmap_fake, fmap_real):
assert feat_fake.shape == feat_real.shape
n_fmaps += 1
feat_loss += self.loss(feat_fake, feat_real)
feat_scale += torch.mean(torch.abs(feat_real))
if self.normalize:
feat_loss /= n_fmaps
return feat_loss
| audiocraft-main | audiocraft/adversarial/losses.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...modules import NormConv2d
from .base import MultiDiscriminator, MultiDiscriminatorOutputType
def get_padding(kernel_size: int, dilation: int = 1) -> int:
return int((kernel_size * dilation - dilation) / 2)
class PeriodDiscriminator(nn.Module):
"""Period sub-discriminator.
Args:
period (int): Period between samples of audio.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
n_layers (int): Number of convolutional layers.
kernel_sizes (list of int): Kernel sizes for convolutions.
stride (int): Stride for convolutions.
filters (int): Initial number of filters in convolutions.
filters_scale (int): Multiplier of number of filters as we increase depth.
max_filters (int): Maximum number of filters.
norm (str): Normalization method.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
"""
def __init__(self, period: int, in_channels: int = 1, out_channels: int = 1,
n_layers: int = 5, kernel_sizes: tp.List[int] = [5, 3], stride: int = 3,
filters: int = 8, filters_scale: int = 4, max_filters: int = 1024,
norm: str = 'weight_norm', activation: str = 'LeakyReLU',
activation_params: dict = {'negative_slope': 0.2}):
super().__init__()
self.period = period
self.n_layers = n_layers
self.activation = getattr(torch.nn, activation)(**activation_params)
self.convs = nn.ModuleList()
in_chs = in_channels
for i in range(self.n_layers):
out_chs = min(filters * (filters_scale ** (i + 1)), max_filters)
eff_stride = 1 if i == self.n_layers - 1 else stride
self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_sizes[0], 1), stride=(eff_stride, 1),
padding=((kernel_sizes[0] - 1) // 2, 0), norm=norm))
in_chs = out_chs
self.conv_post = NormConv2d(in_chs, out_channels, kernel_size=(kernel_sizes[1], 1), stride=1,
padding=((kernel_sizes[1] - 1) // 2, 0), norm=norm)
def forward(self, x: torch.Tensor):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), 'reflect')
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for conv in self.convs:
x = conv(x)
x = self.activation(x)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(MultiDiscriminator):
"""Multi-Period (MPD) Discriminator.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
periods (Sequence[int]): Periods between samples of audio for the sub-discriminators.
**kwargs: Additional args for `PeriodDiscriminator`
"""
def __init__(self, in_channels: int = 1, out_channels: int = 1,
periods: tp.Sequence[int] = [2, 3, 5, 7, 11], **kwargs):
super().__init__()
self.discriminators = nn.ModuleList([
PeriodDiscriminator(p, in_channels, out_channels, **kwargs) for p in periods
])
@property
def num_discriminators(self):
return len(self.discriminators)
def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
logits = []
fmaps = []
for disc in self.discriminators:
logit, fmap = disc(x)
logits.append(logit)
fmaps.append(fmap)
return logits, fmaps
| audiocraft-main | audiocraft/adversarial/discriminators/mpd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torchaudio
import torch
from torch import nn
from einops import rearrange
from ...modules import NormConv2d
from .base import MultiDiscriminator, MultiDiscriminatorOutputType
def get_2d_padding(kernel_size: tp.Tuple[int, int], dilation: tp.Tuple[int, int] = (1, 1)):
return (((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2)
class DiscriminatorSTFT(nn.Module):
"""STFT sub-discriminator.
Args:
filters (int): Number of filters in convolutions.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
n_fft (int): Size of FFT for each scale.
hop_length (int): Length of hop between STFT windows for each scale.
kernel_size (tuple of int): Inner Conv2d kernel sizes.
stride (tuple of int): Inner Conv2d strides.
dilations (list of int): Inner Conv2d dilation on the time dimension.
win_length (int): Window size for each scale.
normalized (bool): Whether to normalize by magnitude after stft.
norm (str): Normalization method.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
growth (int): Growth factor for the filters.
"""
def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1,
n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024, max_filters: int = 1024,
filters_scale: int = 1, kernel_size: tp.Tuple[int, int] = (3, 9), dilations: tp.List = [1, 2, 4],
stride: tp.Tuple[int, int] = (1, 2), normalized: bool = True, norm: str = 'weight_norm',
activation: str = 'LeakyReLU', activation_params: dict = {'negative_slope': 0.2}):
super().__init__()
assert len(kernel_size) == 2
assert len(stride) == 2
self.filters = filters
self.in_channels = in_channels
self.out_channels = out_channels
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.normalized = normalized
self.activation = getattr(torch.nn, activation)(**activation_params)
self.spec_transform = torchaudio.transforms.Spectrogram(
n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window_fn=torch.hann_window,
normalized=self.normalized, center=False, pad_mode=None, power=None)
spec_channels = 2 * self.in_channels
self.convs = nn.ModuleList()
self.convs.append(
NormConv2d(spec_channels, self.filters, kernel_size=kernel_size, padding=get_2d_padding(kernel_size))
)
in_chs = min(filters_scale * self.filters, max_filters)
for i, dilation in enumerate(dilations):
out_chs = min((filters_scale ** (i + 1)) * self.filters, max_filters)
self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride,
dilation=(dilation, 1), padding=get_2d_padding(kernel_size, (dilation, 1)),
norm=norm))
in_chs = out_chs
out_chs = min((filters_scale ** (len(dilations) + 1)) * self.filters, max_filters)
self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_size[0], kernel_size[0]),
padding=get_2d_padding((kernel_size[0], kernel_size[0])),
norm=norm))
self.conv_post = NormConv2d(out_chs, self.out_channels,
kernel_size=(kernel_size[0], kernel_size[0]),
padding=get_2d_padding((kernel_size[0], kernel_size[0])),
norm=norm)
def forward(self, x: torch.Tensor):
fmap = []
z = self.spec_transform(x) # [B, 2, Freq, Frames, 2]
z = torch.cat([z.real, z.imag], dim=1)
z = rearrange(z, 'b c w t -> b c t w')
for i, layer in enumerate(self.convs):
z = layer(z)
z = self.activation(z)
fmap.append(z)
z = self.conv_post(z)
return z, fmap
class MultiScaleSTFTDiscriminator(MultiDiscriminator):
"""Multi-Scale STFT (MS-STFT) discriminator.
Args:
filters (int): Number of filters in convolutions.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
sep_channels (bool): Separate channels to distinct samples for stereo support.
n_ffts (Sequence[int]): Size of FFT for each scale.
hop_lengths (Sequence[int]): Length of hop between STFT windows for each scale.
win_lengths (Sequence[int]): Window size for each scale.
**kwargs: Additional args for STFTDiscriminator.
"""
def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, sep_channels: bool = False,
n_ffts: tp.List[int] = [1024, 2048, 512], hop_lengths: tp.List[int] = [256, 512, 128],
win_lengths: tp.List[int] = [1024, 2048, 512], **kwargs):
super().__init__()
assert len(n_ffts) == len(hop_lengths) == len(win_lengths)
self.sep_channels = sep_channels
self.discriminators = nn.ModuleList([
DiscriminatorSTFT(filters, in_channels=in_channels, out_channels=out_channels,
n_fft=n_ffts[i], win_length=win_lengths[i], hop_length=hop_lengths[i], **kwargs)
for i in range(len(n_ffts))
])
@property
def num_discriminators(self):
return len(self.discriminators)
def _separate_channels(self, x: torch.Tensor) -> torch.Tensor:
B, C, T = x.shape
return x.view(-1, 1, T)
def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
logits = []
fmaps = []
for disc in self.discriminators:
logit, fmap = disc(x)
logits.append(logit)
fmaps.append(fmap)
return logits, fmaps
| audiocraft-main | audiocraft/adversarial/discriminators/msstftd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import numpy as np
import torch
import torch.nn as nn
from ...modules import NormConv1d
from .base import MultiDiscriminator, MultiDiscriminatorOutputType
class ScaleDiscriminator(nn.Module):
"""Waveform sub-discriminator.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (Sequence[int]): Kernel sizes for first and last convolutions.
filters (int): Number of initial filters for convolutions.
max_filters (int): Maximum number of filters.
downsample_scales (Sequence[int]): Scale for downsampling implemented as strided convolutions.
inner_kernel_sizes (Sequence[int] or None): Kernel sizes for inner convolutions.
groups (Sequence[int] or None): Groups for inner convolutions.
strides (Sequence[int] or None): Strides for inner convolutions.
paddings (Sequence[int] or None): Paddings for inner convolutions.
norm (str): Normalization method.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
pad (str): Padding for initial convolution.
pad_params (dict): Parameters to provide to the padding module.
"""
def __init__(self, in_channels=1, out_channels=1, kernel_sizes: tp.Sequence[int] = [5, 3],
filters: int = 16, max_filters: int = 1024, downsample_scales: tp.Sequence[int] = [4, 4, 4, 4],
inner_kernel_sizes: tp.Optional[tp.Sequence[int]] = None, groups: tp.Optional[tp.Sequence[int]] = None,
strides: tp.Optional[tp.Sequence[int]] = None, paddings: tp.Optional[tp.Sequence[int]] = None,
norm: str = 'weight_norm', activation: str = 'LeakyReLU',
activation_params: dict = {'negative_slope': 0.2}, pad: str = 'ReflectionPad1d',
pad_params: dict = {}):
super().__init__()
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
assert (inner_kernel_sizes is None or len(inner_kernel_sizes) == len(downsample_scales))
assert (groups is None or len(groups) == len(downsample_scales))
assert (strides is None or len(strides) == len(downsample_scales))
assert (paddings is None or len(paddings) == len(downsample_scales))
self.activation = getattr(torch.nn, activation)(**activation_params)
self.convs = nn.ModuleList()
self.convs.append(
nn.Sequential(
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
NormConv1d(in_channels, filters, kernel_size=np.prod(kernel_sizes), stride=1, norm=norm)
)
)
in_chs = filters
for i, downsample_scale in enumerate(downsample_scales):
out_chs = min(in_chs * downsample_scale, max_filters)
default_kernel_size = downsample_scale * 10 + 1
default_stride = downsample_scale
default_padding = (default_kernel_size - 1) // 2
default_groups = in_chs // 4
self.convs.append(
NormConv1d(in_chs, out_chs,
kernel_size=inner_kernel_sizes[i] if inner_kernel_sizes else default_kernel_size,
stride=strides[i] if strides else default_stride,
groups=groups[i] if groups else default_groups,
padding=paddings[i] if paddings else default_padding,
norm=norm))
in_chs = out_chs
out_chs = min(in_chs * 2, max_filters)
self.convs.append(NormConv1d(in_chs, out_chs, kernel_size=kernel_sizes[0], stride=1,
padding=(kernel_sizes[0] - 1) // 2, norm=norm))
self.conv_post = NormConv1d(out_chs, out_channels, kernel_size=kernel_sizes[1], stride=1,
padding=(kernel_sizes[1] - 1) // 2, norm=norm)
def forward(self, x: torch.Tensor):
fmap = []
for layer in self.convs:
x = layer(x)
x = self.activation(x)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(MultiDiscriminator):
"""Multi-Scale (MSD) Discriminator,
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
downsample_factor (int): Downsampling factor between the different scales.
scale_norms (Sequence[str]): Normalization for each sub-discriminator.
**kwargs: Additional args for ScaleDiscriminator.
"""
def __init__(self, in_channels: int = 1, out_channels: int = 1, downsample_factor: int = 2,
scale_norms: tp.Sequence[str] = ['weight_norm', 'weight_norm', 'weight_norm'], **kwargs):
super().__init__()
self.discriminators = nn.ModuleList([
ScaleDiscriminator(in_channels, out_channels, norm=norm, **kwargs) for norm in scale_norms
])
self.downsample = nn.AvgPool1d(downsample_factor * 2, downsample_factor, padding=downsample_factor)
@property
def num_discriminators(self):
return len(self.discriminators)
def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
logits = []
fmaps = []
for i, disc in enumerate(self.discriminators):
if i != 0:
self.downsample(x)
logit, fmap = disc(x)
logits.append(logit)
fmaps.append(fmap)
return logits, fmaps
| audiocraft-main | audiocraft/adversarial/discriminators/msd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa
from .mpd import MultiPeriodDiscriminator
from .msd import MultiScaleDiscriminator
from .msstftd import MultiScaleSTFTDiscriminator
| audiocraft-main | audiocraft/adversarial/discriminators/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
import typing as tp
import torch
import torch.nn as nn
FeatureMapType = tp.List[torch.Tensor]
LogitsType = torch.Tensor
MultiDiscriminatorOutputType = tp.Tuple[tp.List[LogitsType], tp.List[FeatureMapType]]
class MultiDiscriminator(ABC, nn.Module):
"""Base implementation for discriminators composed of sub-discriminators acting at different scales.
"""
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
...
@property
@abstractmethod
def num_discriminators(self) -> int:
"""Number of discriminators.
"""
...
| audiocraft-main | audiocraft/adversarial/discriminators/base.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around FSDP for more convenient use in the training loops.
"""
from contextlib import contextmanager
import typing as tp
import dora
import torch
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import (
MixedPrecision, ShardingStrategy, FullStateDictConfig, StateDictType)
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
def is_fsdp_used() -> bool:
"""Return whether we are using FSDP."""
# A bit of a hack but should work from anywhere.
if dora.is_xp():
cfg = dora.get_xp().cfg
if hasattr(cfg, 'fsdp'):
return cfg.fsdp.use
return False
def is_sharded_tensor(x: tp.Any) -> bool:
return isinstance(x, ShardedTensor)
@contextmanager
def switch_to_full_state_dict(models: tp.List[FSDP]):
# Another bug in FSDP makes it that we cannot use the `state_dict_type` API,
# so let's do thing manually.
for model in models:
FSDP.set_state_dict_type( # type: ignore
model, StateDictType.FULL_STATE_DICT,
FullStateDictConfig(offload_to_cpu=True, rank0_only=True))
try:
yield
finally:
for model in models:
FSDP.set_state_dict_type(model, StateDictType.LOCAL_STATE_DICT) # type: ignore
def wrap_with_fsdp(cfg, model: torch.nn.Module,
block_classes: tp.Optional[tp.Set[tp.Type]] = None) -> FSDP:
"""Wraps a model with FSDP."""
# Some of the typing is disabled until this gets integrated
# into the stable version of PyTorch.
from torch.distributed.fsdp.wrap import ModuleWrapPolicy # type: ignore
# we import this here to prevent circular import.
from ..modules.transformer import StreamingTransformerLayer
from ..modules.conditioners import ConditioningProvider
_fix_post_backward_hook()
assert cfg.use
sharding_strategy_dict = {
"no_shard": ShardingStrategy.NO_SHARD,
"shard_grad_op": ShardingStrategy.SHARD_GRAD_OP,
"full_shard": ShardingStrategy.FULL_SHARD,
}
dtype_dict = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}
mixed_precision_config = MixedPrecision(
param_dtype=dtype_dict[cfg.param_dtype],
reduce_dtype=dtype_dict[cfg.reduce_dtype],
buffer_dtype=dtype_dict[cfg.buffer_dtype],
)
sharding_strategy_config = sharding_strategy_dict[cfg.sharding_strategy]
# The following is going to require being a bit smart
# when doing LM, because this would flush the weights for every time step
# during generation. One possiblity is to use hybrid sharding:
# See: https://pytorch.org/docs/master/fsdp.html#torch.distributed.fsdp.ShardingStrategy
assert sharding_strategy_config != ShardingStrategy.FULL_SHARD, \
"Not supported at the moment, requires a bit more work."
local_rank = dora.distrib.get_distrib_spec().local_rank
assert local_rank < torch.cuda.device_count(), "Please upgrade Dora!"
auto_wrap_policy = None
if block_classes is None:
block_classes = {StreamingTransformerLayer, ConditioningProvider}
if cfg.per_block:
auto_wrap_policy = ModuleWrapPolicy(block_classes)
wrapped = _FSDPFixStateDict(
model,
sharding_strategy=sharding_strategy_config,
mixed_precision=mixed_precision_config,
device_id=local_rank,
sync_module_states=True,
use_orig_params=True,
auto_wrap_policy=auto_wrap_policy,
) # type: ignore
FSDP.set_state_dict_type(wrapped, StateDictType.LOCAL_STATE_DICT) # type: ignore
# Let the wrapped model know about the wrapping!
# We use __dict__ to avoid it going into the state dict.
# This is a bit dirty, but needed during generation, as otherwise
# the wrapped model would call itself and bypass FSDP.
for module in FSDP.fsdp_modules(wrapped):
original = module._fsdp_wrapped_module
original.__dict__['_fsdp'] = module
return wrapped
def purge_fsdp(model: FSDP):
"""Purge the FSDP cached shard inside the model. This should
allow setting the best state or switching to the EMA.
"""
from torch.distributed.fsdp._runtime_utils import _reshard # type: ignore
for module in FSDP.fsdp_modules(model):
handles = module._handles
if not handles:
continue
handle = handles[0]
unsharded_flat_param = handle._get_padded_unsharded_flat_param()
storage_size: int = unsharded_flat_param._typed_storage()._size() # type: ignore
if storage_size == 0:
continue
true_list = [True for h in handles]
_reshard(module, handles, true_list)
class _FSDPFixStateDict(FSDP):
@staticmethod
def _name_without_fsdp_prefix(name: str) -> str:
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE # type: ignore
parts = name.split('.')
new_parts = [part for part in parts if part != FSDP_WRAPPED_MODULE]
return '.'.join(new_parts)
def state_dict(self) -> tp.Dict[str, tp.Any]: # type: ignore
state = dict(super().state_dict())
for key, value in list(state.items()):
if is_sharded_tensor(value):
del state[key]
return state
def load_state_dict(self, state: tp.Dict[str, tp.Any]): # type: ignore
if self._state_dict_type is StateDictType.FULL_STATE_DICT:
super().load_state_dict(state)
purge_fsdp(self)
return
# Fix FSDP load state dict in all situation.
# Use this only with LOCAL_STATE_DICT !!!
current_state = dict(super().state_dict())
for key, value in state.items():
key = _FSDPFixStateDict._name_without_fsdp_prefix(key)
if key not in current_state:
# Emulate strict loading manually.
raise RuntimeError(f"Unknown state key {key}")
current_state[key].copy_(value)
# Purging cached weights from previous forward.
purge_fsdp(self)
_hook_fixed = False
def _fix_post_backward_hook():
global _hook_fixed
if _hook_fixed:
return
_hook_fixed = True
from torch.distributed.fsdp import _runtime_utils
from torch.distributed.fsdp._common_utils import TrainingState, HandleTrainingState
old_hook = _runtime_utils._post_backward_hook
def _post_backward_hook(state, handle, *args, **kwargs):
checkpointed = getattr(state._fsdp_wrapped_module, '_audiocraft_checkpointed', False)
if checkpointed:
# there will be one more forward in the backward with checkpointing and that will
# massively confuse FSDP, so we have to make it think everything
# is going according to the plan.
state.training_state = TrainingState.FORWARD_BACKWARD
handle._training_state = HandleTrainingState.BACKWARD_PRE
old_hook(state, handle, *args, **kwargs)
_runtime_utils._post_backward_hook = _post_backward_hook
| audiocraft-main | audiocraft/optim/fsdp.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# ModelEMA implementation is taken from
# https://github.com/facebookresearch/demucs
from collections import defaultdict
import typing as tp
import torch
import torch.nn as nn
def _get_all_non_persistent_buffers_set(module: nn.Module, root: str = "") -> set:
names: set = set()
for (name, sub_module) in module.named_modules():
if name == '':
buffer_names = module._non_persistent_buffers_set
buffer_names = {f"{root}.{buff_name}" if len(root) > 0 else buff_name
for buff_name in buffer_names}
names.update(buffer_names)
else:
sub_name = f"{root}.{name}" if len(root) > 0 else name
sub_buffer_names = _get_all_non_persistent_buffers_set(sub_module, sub_name)
names.update(sub_buffer_names)
return names
def _get_named_tensors(module: nn.Module):
non_persistent_buffers_set = _get_all_non_persistent_buffers_set(module)
named_buffers = [(name, buffer) for (name, buffer) in module.named_buffers()
if name not in non_persistent_buffers_set]
named_parameters = list(module.named_parameters())
return named_parameters + named_buffers
class ModuleDictEMA:
"""Exponential Moving Average over a nn.ModuleDict.
You can switch to the EMA weights temporarily.
"""
def __init__(self, module_dict: nn.ModuleDict, decay: float = 0.999,
unbias: bool = True, device: tp.Union[torch.device, str] = 'cpu'):
self.decay = decay
self.module_dict = module_dict
self.state: dict = defaultdict(dict)
self.count = 0
self.device = device
self.unbias = unbias
self._init()
def _init(self):
for module_name, module in self.module_dict.items():
for key, val in _get_named_tensors(module):
if not val.is_floating_point():
continue
device = self.device or val.device
if key not in self.state[module_name]:
self.state[module_name][key] = val.detach().to(device, copy=True)
def step(self):
if self.unbias:
self.count = self.count * self.decay + 1
w = 1 / self.count
else:
w = 1 - self.decay
for module_name, module in self.module_dict.items():
for key, val in _get_named_tensors(module):
if not val.is_floating_point():
continue
device = self.device or val.device
self.state[module_name][key].mul_(1 - w)
self.state[module_name][key].add_(val.detach().to(device), alpha=w)
def state_dict(self):
return {'state': self.state, 'count': self.count}
def load_state_dict(self, state):
self.count = state['count']
for module_name, module in state['state'].items():
for key, val in module.items():
self.state[module_name][key].copy_(val)
| audiocraft-main | audiocraft/optim/ema.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Optimization stuff. In particular, optimizers (DAdaptAdam), schedulers
and Exponential Moving Average.
"""
# flake8: noqa
from .cosine_lr_scheduler import CosineLRScheduler
from .dadam import DAdaptAdam
from .inverse_sqrt_lr_scheduler import InverseSquareRootLRScheduler
from .linear_warmup_lr_scheduler import LinearWarmupLRScheduler
from .polynomial_decay_lr_scheduler import PolynomialDecayLRScheduler
from .ema import ModuleDictEMA
| audiocraft-main | audiocraft/optim/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import TYPE_CHECKING, Any
import torch
import torch.optim
import torch.distributed as dist
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
logger = logging.getLogger(__name__)
def to_real(x):
if torch.is_complex(x):
return x.real
else:
return x
class DAdaptAdam(torch.optim.Optimizer):
"""Adam with D-Adaptation automatic step-sizes.
Leave LR set to 1 unless you encounter instability.
Args:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
log_every (int):
Log using print every k steps, default 0 (no logging).
decouple (boolean):
Use AdamW style decoupled weight decay
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
rate warmup effect.
fsdp_in_use (bool):
If you're using sharded parameters, this should be set to True. The optimizer
will attempt to auto-detect this, but if you're using an implementation other
than PyTorch's builtin version, the auto-detection won't work.
"""
def __init__(self, params, lr=1.0,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
log_every=0,
decouple=True,
d0=1e-6,
growth_rate=float('inf')):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if decouple:
logger.info("Using decoupled weight decay")
from .fsdp import is_fsdp_used
fsdp_in_use = is_fsdp_used()
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
d=d0,
k=0,
gsq_weighted=0.0,
log_every=log_every,
decouple=decouple,
growth_rate=growth_rate,
fsdp_in_use=fsdp_in_use)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
g_sq = 0.0
sksq_weighted = 0.0
sk_l1 = 0.0
lr = max(group['lr'] for group in self.param_groups)
group = self.param_groups[0]
gsq_weighted = group['gsq_weighted']
d = group['d']
dlr = d*lr
growth_rate = group['growth_rate']
decouple = group['decouple']
fsdp_in_use = group['fsdp_in_use']
log_every = group['log_every']
beta1, beta2 = group['betas']
for group in self.param_groups:
group_lr = group['lr']
decay = group['weight_decay']
k = group['k']
eps = group['eps']
if group_lr not in [lr, 0.0]:
raise RuntimeError("Setting different lr values in different parameter "
"groups is only supported for values of 0")
for p in group['params']:
if p.grad is None:
continue
if hasattr(p, "_fsdp_flattened"):
fsdp_in_use = True
grad = p.grad.data
# Apply weight decay (coupled variant)
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(
to_real(p.data), memory_format=torch.preserve_format).detach()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
grad_grad = to_real(grad * grad.conj())
# Adam EMA updates
if group_lr > 0:
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1-beta1))
exp_avg_sq.mul_(beta2).add_(grad_grad, alpha=1-beta2)
denom = exp_avg_sq.sqrt().add_(eps)
g_sq += grad_grad.div_(denom).sum().item()
s = state['s']
s.mul_(beta2).add_(grad, alpha=dlr*(1-beta2))
sksq_weighted += to_real(s * s.conj()).div_(denom).sum().item()
sk_l1 += s.abs().sum().item()
######
gsq_weighted = beta2*gsq_weighted + g_sq*(dlr**2)*(1-beta2)
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
if fsdp_in_use:
dist_tensor = torch.zeros(3, device='cuda')
dist_tensor[0] = sksq_weighted
dist_tensor[1] = gsq_weighted
dist_tensor[2] = sk_l1
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_sksq_weighted = dist_tensor[0]
global_gsq_weighted = dist_tensor[1]
global_sk_l1 = dist_tensor[2]
else:
global_sksq_weighted = sksq_weighted
global_gsq_weighted = gsq_weighted
global_sk_l1 = sk_l1
d_hat = (global_sksq_weighted/(1-beta2) - global_gsq_weighted)/global_sk_l1
d = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
logger.info(
f"(k={k}) dlr: {dlr:1.1e} d_hat: {d_hat:1.1e}, d: {d:1.8}. "
f"sksq_weighted={global_sksq_weighted:1.1e} gsq_weighted={global_gsq_weighted:1.1e} "
f"sk_l1={global_sk_l1:1.1e}{' (FSDP)' if fsdp_in_use else ''}")
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['d'] = d
group_lr = group['lr']
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
denom = exp_avg_sq.sqrt().add_(eps)
denom = denom.type(p.type())
# Apply weight decay (decoupled variant)
if decay != 0 and decouple and group_lr > 0:
p.data.add_(p.data, alpha=-decay * dlr)
# Take step
p.data.addcdiv_(exp_avg, denom, value=-1)
group['k'] = k + 1
return loss
| audiocraft-main | audiocraft/optim/dadam.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class InverseSquareRootLRScheduler(_LRScheduler):
"""Inverse square root LR scheduler.
Args:
optimizer (Optimizer): Torch optimizer.
warmup_steps (int): Number of warmup steps.
warmup_init_lr (tp.Optional[float]): Initial learning rate
during warmup phase. When not set, use the provided learning rate.
"""
def __init__(self, optimizer: Optimizer, warmup_steps: int, warmup_init_lr: tp.Optional[float] = 0):
self.warmup_steps = warmup_steps
self.warmup_init_lr = warmup_init_lr
super().__init__(optimizer)
def _get_sched_lr(self, lr: float, step: int):
if step < self.warmup_steps:
warmup_init_lr = self.warmup_init_lr or 0
lr_step = (lr - warmup_init_lr) / self.warmup_steps
lr = warmup_init_lr + step * lr_step
else:
decay_factor = lr * self.warmup_steps**0.5
lr = decay_factor * step**-0.5
return lr
def get_lr(self):
return [self._get_sched_lr(base_lr, self._step_count) for base_lr in self.base_lrs]
| audiocraft-main | audiocraft/optim/inverse_sqrt_lr_scheduler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class CosineLRScheduler(_LRScheduler):
"""Cosine LR scheduler.
Args:
optimizer (Optimizer): Torch optimizer.
warmup_steps (int): Number of warmup steps.
total_steps (int): Total number of steps.
lr_min_ratio (float): Minimum learning rate.
cycle_length (float): Cycle length.
"""
def __init__(self, optimizer: Optimizer, total_steps: int, warmup_steps: int,
lr_min_ratio: float = 0.0, cycle_length: float = 1.0):
self.warmup_steps = warmup_steps
assert self.warmup_steps >= 0
self.total_steps = total_steps
assert self.total_steps >= 0
self.lr_min_ratio = lr_min_ratio
self.cycle_length = cycle_length
super().__init__(optimizer)
def _get_sched_lr(self, lr: float, step: int):
if step < self.warmup_steps:
lr_ratio = step / self.warmup_steps
lr = lr_ratio * lr
elif step <= self.total_steps:
s = (step - self.warmup_steps) / (self.total_steps - self.warmup_steps)
lr_ratio = self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * \
(1. + math.cos(math.pi * s / self.cycle_length))
lr = lr_ratio * lr
else:
lr_ratio = self.lr_min_ratio
lr = lr_ratio * lr
return lr
def get_lr(self):
return [self._get_sched_lr(lr, self.last_epoch) for lr in self.base_lrs]
| audiocraft-main | audiocraft/optim/cosine_lr_scheduler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class PolynomialDecayLRScheduler(_LRScheduler):
"""Polynomial decay LR scheduler.
Args:
optimizer (Optimizer): Torch optimizer.
warmup_steps (int): Number of warmup steps.
total_steps (int): Total number of steps.
end_lr (float): Final learning rate to achieve over total number of steps.
zero_lr_warmup_steps (int): Number of steps with a learning rate of value 0.
power (float): Decay exponent.
"""
def __init__(self, optimizer: Optimizer, warmup_steps: int, total_steps: int,
end_lr: float = 0., zero_lr_warmup_steps: int = 0, power: float = 1.):
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.end_lr = end_lr
self.zero_lr_warmup_steps = zero_lr_warmup_steps
self.power = power
super().__init__(optimizer)
def _get_sched_lr(self, lr: float, step: int):
if self.zero_lr_warmup_steps > 0 and step <= self.zero_lr_warmup_steps:
lr = 0
elif self.warmup_steps > 0 and step <= self.warmup_steps + self.zero_lr_warmup_steps:
lr_ratio = (step - self.zero_lr_warmup_steps) / float(self.warmup_steps)
lr = lr_ratio * lr
elif step >= self.total_steps:
lr = self.end_lr
else:
total_warmup_steps = self.warmup_steps + self.zero_lr_warmup_steps
lr_range = lr - self.end_lr
pct_remaining = 1 - (step - total_warmup_steps) / (self.total_steps - total_warmup_steps)
lr = lr_range * pct_remaining ** self.power + self.end_lr
return lr
def get_lr(self):
return [self._get_sched_lr(base_lr, self.last_epoch) for base_lr in self.base_lrs]
| audiocraft-main | audiocraft/optim/polynomial_decay_lr_scheduler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class LinearWarmupLRScheduler(_LRScheduler):
"""Inverse square root LR scheduler.
Args:
optimizer (Optimizer): Torch optimizer.
warmup_steps (int): Number of warmup steps.
warmup_init_lr (tp.Optional[float]): Initial learning rate
during warmup phase. When not set, use the provided learning rate.
"""
def __init__(self, optimizer: Optimizer, warmup_steps: int, warmup_init_lr: tp.Optional[float] = 0):
self.warmup_steps = warmup_steps
self.warmup_init_lr = warmup_init_lr
super().__init__(optimizer)
def _get_sched_lr(self, lr: float, step: int):
if step < self.warmup_steps:
warmup_init_lr = self.warmup_init_lr or 0
lr_step = (lr - warmup_init_lr) / self.warmup_steps
lr = warmup_init_lr + step * lr_step
return lr
def get_lr(self):
return [self._get_sched_lr(base_lr, self.last_epoch) for base_lr in self.base_lrs]
| audiocraft-main | audiocraft/optim/linear_warmup_lr_scheduler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
All the functions to build the relevant solvers and used objects
from the Hydra config.
"""
from enum import Enum
import logging
import typing as tp
import dora
import flashy
import omegaconf
import torch
from torch import nn
from torch.optim import Optimizer
# LRScheduler was renamed in some torch versions
try:
from torch.optim.lr_scheduler import LRScheduler # type: ignore
except ImportError:
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
from .base import StandardSolver
from .. import adversarial, data, losses, metrics, optim
from ..utils.utils import dict_from_config, get_loader
logger = logging.getLogger(__name__)
class DatasetType(Enum):
AUDIO = "audio"
MUSIC = "music"
SOUND = "sound"
def get_solver(cfg: omegaconf.DictConfig) -> StandardSolver:
"""Instantiate solver from config."""
from .audiogen import AudioGenSolver
from .compression import CompressionSolver
from .musicgen import MusicGenSolver
from .diffusion import DiffusionSolver
klass = {
'compression': CompressionSolver,
'musicgen': MusicGenSolver,
'audiogen': AudioGenSolver,
'lm': MusicGenSolver, # backward compatibility
'diffusion': DiffusionSolver,
'sound_lm': AudioGenSolver, # backward compatibility
}[cfg.solver]
return klass(cfg) # type: ignore
def get_optim_parameter_groups(model: nn.Module):
"""Create parameter groups for the model using the appropriate method
if defined for each modules, to create the different groups.
Args:
model (nn.Module): torch model
Returns:
List of parameter groups
"""
seen_params: tp.Set[nn.parameter.Parameter] = set()
other_params = []
groups = []
for name, module in model.named_modules():
if hasattr(module, 'make_optim_group'):
group = module.make_optim_group()
params = set(group['params'])
assert params.isdisjoint(seen_params)
seen_params |= set(params)
groups.append(group)
for param in model.parameters():
if param not in seen_params:
other_params.append(param)
groups.insert(0, {'params': other_params})
parameters = groups
return parameters
def get_optimizer(params: tp.Union[nn.Module, tp.Iterable[torch.Tensor]], cfg: omegaconf.DictConfig) -> Optimizer:
"""Build torch optimizer from config and set of parameters.
Supported optimizers: Adam, AdamW
Args:
params (nn.Module or iterable of torch.Tensor): Parameters to optimize.
cfg (DictConfig): Optimization-related configuration.
Returns:
torch.optim.Optimizer.
"""
if 'optimizer' not in cfg:
if getattr(cfg, 'optim', None) is not None:
raise KeyError("Optimizer not found in config. Try instantiating optimizer from cfg.optim?")
else:
raise KeyError("Optimizer not found in config.")
parameters = get_optim_parameter_groups(params) if isinstance(params, nn.Module) else params
optimizer: torch.optim.Optimizer
if cfg.optimizer == 'adam':
optimizer = torch.optim.Adam(parameters, lr=cfg.lr, **cfg.adam)
elif cfg.optimizer == 'adamw':
optimizer = torch.optim.AdamW(parameters, lr=cfg.lr, **cfg.adam)
elif cfg.optimizer == 'dadam':
optimizer = optim.DAdaptAdam(parameters, lr=cfg.lr, **cfg.adam)
else:
raise ValueError(f"Unsupported LR Scheduler: {cfg.lr_scheduler}")
return optimizer
def get_lr_scheduler(optimizer: torch.optim.Optimizer,
cfg: omegaconf.DictConfig,
total_updates: int) -> tp.Optional[LRScheduler]:
"""Build torch learning rate scheduler from config and associated optimizer.
Supported learning rate schedulers: ExponentialLRScheduler, PlateauLRScheduler
Args:
optimizer (torch.optim.Optimizer): Optimizer.
cfg (DictConfig): Schedule-related configuration.
total_updates (int): Total number of updates.
Returns:
torch.optim.Optimizer.
"""
if 'lr_scheduler' not in cfg:
raise KeyError("LR Scheduler not found in config")
lr_sched: tp.Optional[LRScheduler] = None
if cfg.lr_scheduler == 'step':
lr_sched = torch.optim.lr_scheduler.StepLR(optimizer, **cfg.step)
elif cfg.lr_scheduler == 'exponential':
lr_sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=cfg.exponential)
elif cfg.lr_scheduler == 'cosine':
kwargs = dict_from_config(cfg.cosine)
warmup_steps = kwargs.pop('warmup')
lr_sched = optim.CosineLRScheduler(
optimizer, warmup_steps=warmup_steps, total_steps=total_updates, **kwargs)
elif cfg.lr_scheduler == 'polynomial_decay':
kwargs = dict_from_config(cfg.polynomial_decay)
warmup_steps = kwargs.pop('warmup')
lr_sched = optim.PolynomialDecayLRScheduler(
optimizer, warmup_steps=warmup_steps, total_steps=total_updates, **kwargs)
elif cfg.lr_scheduler == 'inverse_sqrt':
kwargs = dict_from_config(cfg.inverse_sqrt)
warmup_steps = kwargs.pop('warmup')
lr_sched = optim.InverseSquareRootLRScheduler(optimizer, warmup_steps=warmup_steps, **kwargs)
elif cfg.lr_scheduler == 'linear_warmup':
kwargs = dict_from_config(cfg.linear_warmup)
warmup_steps = kwargs.pop('warmup')
lr_sched = optim.LinearWarmupLRScheduler(optimizer, warmup_steps=warmup_steps, **kwargs)
elif cfg.lr_scheduler is not None:
raise ValueError(f"Unsupported LR Scheduler: {cfg.lr_scheduler}")
return lr_sched
def get_ema(module_dict: nn.ModuleDict, cfg: omegaconf.DictConfig) -> tp.Optional[optim.ModuleDictEMA]:
"""Initialize Exponential Moving Average.
Args:
module_dict (nn.ModuleDict): ModuleDict for which to compute the EMA.
cfg (omegaconf.DictConfig): Optim EMA configuration.
Returns:
optim.ModuleDictEMA: EMA version of the ModuleDict.
"""
kw: tp.Dict[str, tp.Any] = dict(cfg)
use = kw.pop('use', False)
decay = kw.pop('decay', None)
device = kw.pop('device', None)
if not use:
return None
if len(module_dict) == 0:
raise ValueError("Trying to build EMA but an empty module_dict source is provided!")
ema_module = optim.ModuleDictEMA(module_dict, decay=decay, device=device)
return ema_module
def get_loss(loss_name: str, cfg: omegaconf.DictConfig):
"""Instantiate loss from configuration."""
klass = {
'l1': torch.nn.L1Loss,
'l2': torch.nn.MSELoss,
'mel': losses.MelSpectrogramL1Loss,
'mrstft': losses.MRSTFTLoss,
'msspec': losses.MultiScaleMelSpectrogramLoss,
'sisnr': losses.SISNR,
}[loss_name]
kwargs = dict(getattr(cfg, loss_name))
return klass(**kwargs)
def get_balancer(loss_weights: tp.Dict[str, float], cfg: omegaconf.DictConfig) -> losses.Balancer:
"""Instantiate loss balancer from configuration for the provided weights."""
kwargs: tp.Dict[str, tp.Any] = dict_from_config(cfg)
return losses.Balancer(loss_weights, **kwargs)
def get_adversary(name: str, cfg: omegaconf.DictConfig) -> nn.Module:
"""Initialize adversary from config."""
klass = {
'msd': adversarial.MultiScaleDiscriminator,
'mpd': adversarial.MultiPeriodDiscriminator,
'msstftd': adversarial.MultiScaleSTFTDiscriminator,
}[name]
adv_cfg: tp.Dict[str, tp.Any] = dict(getattr(cfg, name))
return klass(**adv_cfg)
def get_adversarial_losses(cfg) -> nn.ModuleDict:
"""Initialize dict of adversarial losses from config."""
device = cfg.device
adv_cfg = getattr(cfg, 'adversarial')
adversaries = adv_cfg.get('adversaries', [])
adv_loss_name = adv_cfg['adv_loss']
feat_loss_name = adv_cfg.get('feat_loss')
normalize = adv_cfg.get('normalize', True)
feat_loss: tp.Optional[adversarial.FeatureMatchingLoss] = None
if feat_loss_name:
assert feat_loss_name in ['l1', 'l2'], f"Feature loss only support L1 or L2 but {feat_loss_name} found."
loss = get_loss(feat_loss_name, cfg)
feat_loss = adversarial.FeatureMatchingLoss(loss, normalize)
loss = adversarial.get_adv_criterion(adv_loss_name)
loss_real = adversarial.get_real_criterion(adv_loss_name)
loss_fake = adversarial.get_fake_criterion(adv_loss_name)
adv_losses = nn.ModuleDict()
for adv_name in adversaries:
adversary = get_adversary(adv_name, cfg).to(device)
optimizer = get_optimizer(adversary.parameters(), cfg.optim)
adv_loss = adversarial.AdversarialLoss(
adversary,
optimizer,
loss=loss,
loss_real=loss_real,
loss_fake=loss_fake,
loss_feat=feat_loss,
normalize=normalize
)
adv_losses[adv_name] = adv_loss
return adv_losses
def get_visqol(cfg: omegaconf.DictConfig) -> metrics.ViSQOL:
"""Instantiate ViSQOL metric from config."""
kwargs = dict_from_config(cfg)
return metrics.ViSQOL(**kwargs)
def get_fad(cfg: omegaconf.DictConfig) -> metrics.FrechetAudioDistanceMetric:
"""Instantiate Frechet Audio Distance metric from config."""
kwargs = dict_from_config(cfg.tf)
xp = dora.get_xp()
kwargs['log_folder'] = xp.folder
return metrics.FrechetAudioDistanceMetric(**kwargs)
def get_kldiv(cfg: omegaconf.DictConfig) -> metrics.KLDivergenceMetric:
"""Instantiate KL-Divergence metric from config."""
kld_metrics = {
'passt': metrics.PasstKLDivergenceMetric,
}
klass = kld_metrics[cfg.model]
kwargs = dict_from_config(cfg.get(cfg.model))
return klass(**kwargs)
def get_text_consistency(cfg: omegaconf.DictConfig) -> metrics.TextConsistencyMetric:
"""Instantiate Text Consistency metric from config."""
text_consistency_metrics = {
'clap': metrics.CLAPTextConsistencyMetric
}
klass = text_consistency_metrics[cfg.model]
kwargs = dict_from_config(cfg.get(cfg.model))
return klass(**kwargs)
def get_chroma_cosine_similarity(cfg: omegaconf.DictConfig) -> metrics.ChromaCosineSimilarityMetric:
"""Instantiate Chroma Cosine Similarity metric from config."""
assert cfg.model == 'chroma_base', "Only support 'chroma_base' method for chroma cosine similarity metric"
kwargs = dict_from_config(cfg.get(cfg.model))
return metrics.ChromaCosineSimilarityMetric(**kwargs)
def get_audio_datasets(cfg: omegaconf.DictConfig,
dataset_type: DatasetType = DatasetType.AUDIO) -> tp.Dict[str, torch.utils.data.DataLoader]:
"""Build AudioDataset from configuration.
Args:
cfg (omegaconf.DictConfig): Configuration.
dataset_type: The type of dataset to create.
Returns:
dict[str, torch.utils.data.DataLoader]: Map of dataloader for each data split.
"""
dataloaders: dict = {}
sample_rate = cfg.sample_rate
channels = cfg.channels
seed = cfg.seed
max_sample_rate = cfg.datasource.max_sample_rate
max_channels = cfg.datasource.max_channels
assert cfg.dataset is not None, "Could not find dataset definition in config"
dataset_cfg = dict_from_config(cfg.dataset)
splits_cfg: dict = {}
splits_cfg['train'] = dataset_cfg.pop('train')
splits_cfg['valid'] = dataset_cfg.pop('valid')
splits_cfg['evaluate'] = dataset_cfg.pop('evaluate')
splits_cfg['generate'] = dataset_cfg.pop('generate')
execute_only_stage = cfg.get('execute_only', None)
for split, path in cfg.datasource.items():
if not isinstance(path, str):
continue # skipping this as not a path
if execute_only_stage is not None and split != execute_only_stage:
continue
logger.info(f"Loading audio data split {split}: {str(path)}")
assert (
cfg.sample_rate <= max_sample_rate
), f"Expecting a max sample rate of {max_sample_rate} for datasource but {sample_rate} found."
assert (
cfg.channels <= max_channels
), f"Expecting a max number of channels of {max_channels} for datasource but {channels} found."
split_cfg = splits_cfg[split]
split_kwargs = {k: v for k, v in split_cfg.items()}
kwargs = {**dataset_cfg, **split_kwargs} # split kwargs overrides default dataset_cfg
kwargs['sample_rate'] = sample_rate
kwargs['channels'] = channels
if kwargs.get('permutation_on_files') and cfg.optim.updates_per_epoch:
kwargs['num_samples'] = (
flashy.distrib.world_size() * cfg.dataset.batch_size * cfg.optim.updates_per_epoch)
num_samples = kwargs['num_samples']
shuffle = kwargs['shuffle']
return_info = kwargs.pop('return_info')
batch_size = kwargs.pop('batch_size', None)
num_workers = kwargs.pop('num_workers')
if dataset_type == DatasetType.MUSIC:
dataset = data.music_dataset.MusicDataset.from_meta(path, **kwargs)
elif dataset_type == DatasetType.SOUND:
dataset = data.sound_dataset.SoundDataset.from_meta(path, **kwargs)
elif dataset_type == DatasetType.AUDIO:
dataset = data.info_audio_dataset.InfoAudioDataset.from_meta(path, return_info=return_info, **kwargs)
else:
raise ValueError(f"Dataset type is unsupported: {dataset_type}")
loader = get_loader(
dataset,
num_samples,
batch_size=batch_size,
num_workers=num_workers,
seed=seed,
collate_fn=dataset.collater if return_info else None,
shuffle=shuffle,
)
dataloaders[split] = loader
return dataloaders
| audiocraft-main | audiocraft/solvers/builders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import flashy
import julius
import omegaconf
import torch
import torch.nn.functional as F
from . import builders
from . import base
from .. import models
from ..modules.diffusion_schedule import NoiseSchedule
from ..metrics import RelativeVolumeMel
from ..models.builders import get_processor
from ..utils.samples.manager import SampleManager
from ..solvers.compression import CompressionSolver
class PerStageMetrics:
"""Handle prompting the metrics per stage.
It outputs the metrics per range of diffusion states.
e.g. avg loss when t in [250, 500]
"""
def __init__(self, num_steps: int, num_stages: int = 4):
self.num_steps = num_steps
self.num_stages = num_stages
def __call__(self, losses: dict, step: tp.Union[int, torch.Tensor]):
if type(step) is int:
stage = int((step / self.num_steps) * self.num_stages)
return {f"{name}_{stage}": loss for name, loss in losses.items()}
elif type(step) is torch.Tensor:
stage_tensor = ((step / self.num_steps) * self.num_stages).long()
out: tp.Dict[str, float] = {}
for stage_idx in range(self.num_stages):
mask = (stage_tensor == stage_idx)
N = mask.sum()
stage_out = {}
if N > 0: # pass if no elements in the stage
for name, loss in losses.items():
stage_loss = (mask * loss).sum() / N
stage_out[f"{name}_{stage_idx}"] = stage_loss
out = {**out, **stage_out}
return out
class DataProcess:
"""Apply filtering or resampling.
Args:
initial_sr (int): Initial sample rate.
target_sr (int): Target sample rate.
use_resampling: Whether to use resampling or not.
use_filter (bool):
n_bands (int): Number of bands to consider.
idx_band (int):
device (torch.device or str):
cutoffs ():
boost (bool):
"""
def __init__(self, initial_sr: int = 24000, target_sr: int = 16000, use_resampling: bool = False,
use_filter: bool = False, n_bands: int = 4,
idx_band: int = 0, device: torch.device = torch.device('cpu'), cutoffs=None, boost=False):
"""Apply filtering or resampling
Args:
initial_sr (int): sample rate of the dataset
target_sr (int): sample rate after resampling
use_resampling (bool): whether or not performs resampling
use_filter (bool): when True filter the data to keep only one frequency band
n_bands (int): Number of bands used
cuts (none or list): The cutoff frequencies of the band filtering
if None then we use mel scale bands.
idx_band (int): index of the frequency band. 0 are lows ... (n_bands - 1) highs
boost (bool): make the data scale match our music dataset.
"""
assert idx_band < n_bands
self.idx_band = idx_band
if use_filter:
if cutoffs is not None:
self.filter = julius.SplitBands(sample_rate=initial_sr, cutoffs=cutoffs).to(device)
else:
self.filter = julius.SplitBands(sample_rate=initial_sr, n_bands=n_bands).to(device)
self.use_filter = use_filter
self.use_resampling = use_resampling
self.target_sr = target_sr
self.initial_sr = initial_sr
self.boost = boost
def process_data(self, x, metric=False):
if x is None:
return None
if self.boost:
x /= torch.clamp(x.std(dim=(1, 2), keepdim=True), min=1e-4)
x * 0.22
if self.use_filter and not metric:
x = self.filter(x)[self.idx_band]
if self.use_resampling:
x = julius.resample_frac(x, old_sr=self.initial_sr, new_sr=self.target_sr)
return x
def inverse_process(self, x):
"""Upsampling only."""
if self.use_resampling:
x = julius.resample_frac(x, old_sr=self.target_sr, new_sr=self.target_sr)
return x
class DiffusionSolver(base.StandardSolver):
"""Solver for compression task.
The diffusion task allows for MultiBand diffusion model training.
Args:
cfg (DictConfig): Configuration.
"""
def __init__(self, cfg: omegaconf.DictConfig):
super().__init__(cfg)
self.cfg = cfg
self.device = cfg.device
self.sample_rate: int = self.cfg.sample_rate
self.codec_model = CompressionSolver.model_from_checkpoint(
cfg.compression_model_checkpoint, device=self.device)
self.codec_model.set_num_codebooks(cfg.n_q)
assert self.codec_model.sample_rate == self.cfg.sample_rate, (
f"Codec model sample rate is {self.codec_model.sample_rate} but "
f"Solver sample rate is {self.cfg.sample_rate}."
)
assert self.codec_model.sample_rate == self.sample_rate, \
f"Sample rate of solver {self.sample_rate} and codec {self.codec_model.sample_rate} " \
"don't match."
self.sample_processor = get_processor(cfg.processor, sample_rate=self.sample_rate)
self.register_stateful('sample_processor')
self.sample_processor.to(self.device)
self.schedule = NoiseSchedule(
**cfg.schedule, device=self.device, sample_processor=self.sample_processor)
self.eval_metric: tp.Optional[torch.nn.Module] = None
self.rvm = RelativeVolumeMel()
self.data_processor = DataProcess(initial_sr=self.sample_rate, target_sr=cfg.resampling.target_sr,
use_resampling=cfg.resampling.use, cutoffs=cfg.filter.cutoffs,
use_filter=cfg.filter.use, n_bands=cfg.filter.n_bands,
idx_band=cfg.filter.idx_band, device=self.device)
@property
def best_metric_name(self) -> tp.Optional[str]:
if self._current_stage == "evaluate":
return 'rvm'
else:
return 'loss'
@torch.no_grad()
def get_condition(self, wav: torch.Tensor) -> torch.Tensor:
codes, scale = self.codec_model.encode(wav)
assert scale is None, "Scaled compression models not supported."
emb = self.codec_model.decode_latent(codes)
return emb
def build_model(self):
"""Build model and optimizer as well as optional Exponential Moving Average of the model.
"""
# Model and optimizer
self.model = models.builders.get_diffusion_model(self.cfg).to(self.device)
self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)
self.register_stateful('model', 'optimizer')
self.register_best_state('model')
self.register_ema('model')
def build_dataloaders(self):
"""Build audio dataloaders for each stage."""
self.dataloaders = builders.get_audio_datasets(self.cfg)
def show(self):
# TODO
raise NotImplementedError()
def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):
"""Perform one training or valid step on a given batch."""
x = batch.to(self.device)
loss_fun = F.mse_loss if self.cfg.loss.kind == 'mse' else F.l1_loss
condition = self.get_condition(x) # [bs, 128, T/hop, n_emb]
sample = self.data_processor.process_data(x)
input_, target, step = self.schedule.get_training_item(sample,
tensor_step=self.cfg.schedule.variable_step_batch)
out = self.model(input_, step, condition=condition).sample
base_loss = loss_fun(out, target, reduction='none').mean(dim=(1, 2))
reference_loss = loss_fun(input_, target, reduction='none').mean(dim=(1, 2))
loss = base_loss / reference_loss ** self.cfg.loss.norm_power
if self.is_training:
loss.mean().backward()
flashy.distrib.sync_model(self.model)
self.optimizer.step()
self.optimizer.zero_grad()
metrics = {
'loss': loss.mean(), 'normed_loss': (base_loss / reference_loss).mean(),
}
metrics.update(self.per_stage({'loss': loss, 'normed_loss': base_loss / reference_loss}, step))
metrics.update({
'std_in': input_.std(), 'std_out': out.std()})
return metrics
def run_epoch(self):
# reset random seed at the beginning of the epoch
self.rng = torch.Generator()
self.rng.manual_seed(1234 + self.epoch)
self.per_stage = PerStageMetrics(self.schedule.num_steps, self.cfg.metrics.num_stage)
# run epoch
super().run_epoch()
def evaluate(self):
"""Evaluate stage.
Runs audio reconstruction evaluation.
"""
self.model.eval()
evaluate_stage_name = f'{self.current_stage}'
loader = self.dataloaders['evaluate']
updates = len(loader)
lp = self.log_progress(f'{evaluate_stage_name} estimate', loader, total=updates, updates=self.log_updates)
metrics = {}
n = 1
for idx, batch in enumerate(lp):
x = batch.to(self.device)
with torch.no_grad():
y_pred = self.regenerate(x)
y_pred = y_pred.cpu()
y = batch.cpu() # should already be on CPU but just in case
rvm = self.rvm(y_pred, y)
lp.update(**rvm)
if len(metrics) == 0:
metrics = rvm
else:
for key in rvm.keys():
metrics[key] = (metrics[key] * n + rvm[key]) / (n + 1)
metrics = flashy.distrib.average_metrics(metrics)
return metrics
@torch.no_grad()
def regenerate(self, wav: torch.Tensor, step_list: tp.Optional[list] = None):
"""Regenerate the given waveform."""
condition = self.get_condition(wav)
initial = self.schedule.get_initial_noise(self.data_processor.process_data(wav)) # sampling rate changes.
result = self.schedule.generate_subsampled(self.model, initial=initial, condition=condition,
step_list=step_list)
result = self.data_processor.inverse_process(result)
return result
def generate(self):
"""Generate stage."""
sample_manager = SampleManager(self.xp)
self.model.eval()
generate_stage_name = f'{self.current_stage}'
loader = self.dataloaders['generate']
updates = len(loader)
lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)
for batch in lp:
reference, _ = batch
reference = reference.to(self.device)
estimate = self.regenerate(reference)
reference = reference.cpu()
estimate = estimate.cpu()
sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)
flashy.distrib.barrier()
| audiocraft-main | audiocraft/solvers/diffusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import time
import typing as tp
import flashy
import math
import omegaconf
import torch
from torch.nn import functional as F
from . import base, builders
from .compression import CompressionSolver
from .. import metrics as eval_metrics
from .. import models
from ..data.audio_dataset import AudioDataset
from ..data.music_dataset import MusicDataset, MusicInfo, AudioInfo
from ..data.audio_utils import normalize_audio
from ..modules.conditioners import JointEmbedCondition, SegmentWithAttributes, WavCondition
from ..utils.cache import CachedBatchWriter, CachedBatchLoader
from ..utils.samples.manager import SampleManager
from ..utils.utils import get_dataset_from_loader, is_jsonable, warn_once
class MusicGenSolver(base.StandardSolver):
"""Solver for MusicGen training task.
Used in: https://arxiv.org/abs/2306.05284
"""
DATASET_TYPE: builders.DatasetType = builders.DatasetType.MUSIC
def __init__(self, cfg: omegaconf.DictConfig):
super().__init__(cfg)
# easier access to sampling parameters
self.generation_params = {
'use_sampling': self.cfg.generate.lm.use_sampling,
'temp': self.cfg.generate.lm.temp,
'top_k': self.cfg.generate.lm.top_k,
'top_p': self.cfg.generate.lm.top_p,
}
self._best_metric_name: tp.Optional[str] = 'ce'
self._cached_batch_writer = None
self._cached_batch_loader = None
if cfg.cache.path:
if cfg.cache.write:
self._cached_batch_writer = CachedBatchWriter(Path(cfg.cache.path))
if self.cfg.cache.write_num_shards:
self.logger.warning("Multiple shard cache, best_metric_name will be set to None.")
self._best_metric_name = None
else:
self._cached_batch_loader = CachedBatchLoader(
Path(cfg.cache.path), cfg.dataset.batch_size, cfg.dataset.num_workers,
min_length=self.cfg.optim.updates_per_epoch or 1)
self.dataloaders['original_train'] = self.dataloaders['train']
self.dataloaders['train'] = self._cached_batch_loader # type: ignore
@staticmethod
def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None,
device: tp.Optional[str] = None, autocast: bool = True,
batch_size: tp.Optional[int] = None,
override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
**kwargs):
"""Mostly a convenience function around magma.train.get_solver_from_sig,
populating all the proper param, deactivating EMA, FSDP, loading the best state,
basically all you need to get a solver ready to "play" with in single GPU mode
and with minimal memory overhead.
Args:
sig (str): signature to load.
dtype (str or None): potential dtype, as a string, i.e. 'float16'.
device (str or None): potential device, as a string, i.e. 'cuda'.
override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'.
"""
from audiocraft import train
our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}}
our_override_cfg['autocast'] = autocast
if dtype is not None:
our_override_cfg['dtype'] = dtype
if device is not None:
our_override_cfg['device'] = device
if batch_size is not None:
our_override_cfg['dataset'] = {'batch_size': batch_size}
if override_cfg is None:
override_cfg = {}
override_cfg = omegaconf.OmegaConf.merge(
omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore
solver = train.get_solver_from_sig(
sig, override_cfg=override_cfg,
load_best=True, disable_fsdp=True,
ignore_state_keys=['optimizer', 'ema'], **kwargs)
solver.model.eval()
return solver
def get_formatter(self, stage_name: str) -> flashy.Formatter:
return flashy.Formatter({
'lr': '.2E',
'ce': '.3f',
'ppl': '.3f',
'grad_norm': '.3E',
}, exclude_keys=['ce_q*', 'ppl_q*'])
@property
def best_metric_name(self) -> tp.Optional[str]:
return self._best_metric_name
def build_model(self) -> None:
"""Instantiate models and optimizer."""
# we can potentially not use all quantizers with which the EnCodec model was trained
# (e.g. we trained the model with quantizers dropout)
self.compression_model = CompressionSolver.wrapped_model_from_checkpoint(
self.cfg, self.cfg.compression_model_checkpoint, device=self.device)
assert self.compression_model.sample_rate == self.cfg.sample_rate, (
f"Compression model sample rate is {self.compression_model.sample_rate} but "
f"Solver sample rate is {self.cfg.sample_rate}."
)
# ensure we have matching configuration between LM and compression model
assert self.cfg.transformer_lm.card == self.compression_model.cardinality, (
"Cardinalities of the LM and compression model don't match: ",
f"LM cardinality is {self.cfg.transformer_lm.card} vs ",
f"compression model cardinality is {self.compression_model.cardinality}"
)
assert self.cfg.transformer_lm.n_q == self.compression_model.num_codebooks, (
"Numbers of codebooks of the LM and compression models don't match: ",
f"LM number of codebooks is {self.cfg.transformer_lm.n_q} vs ",
f"compression model numer of codebooks is {self.compression_model.num_codebooks}"
)
self.logger.info("Compression model has %d codebooks with %d cardinality, and a framerate of %d",
self.compression_model.num_codebooks, self.compression_model.cardinality,
self.compression_model.frame_rate)
# instantiate LM model
self.model: models.LMModel = models.builders.get_lm_model(self.cfg).to(self.device)
if self.cfg.fsdp.use:
assert not self.cfg.autocast, "Cannot use autocast with fsdp"
self.model = self.wrap_with_fsdp(self.model)
self.register_ema('model')
# initialize optimization
self.optimizer = builders.get_optimizer(builders.get_optim_parameter_groups(self.model), self.cfg.optim)
self.lr_scheduler = builders.get_lr_scheduler(self.optimizer, self.cfg.schedule, self.total_updates)
self.register_stateful('compression_model', 'model', 'optimizer', 'lr_scheduler')
self.register_best_state('model')
self.autocast_dtype = {
'float16': torch.float16, 'bfloat16': torch.bfloat16
}[self.cfg.autocast_dtype]
self.scaler: tp.Optional[torch.cuda.amp.GradScaler] = None
if self.cfg.fsdp.use:
need_scaler = self.cfg.fsdp.param_dtype == 'float16'
else:
need_scaler = self.cfg.autocast and self.autocast_dtype is torch.float16
if need_scaler:
if self.cfg.fsdp.use:
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
self.scaler = ShardedGradScaler() # type: ignore
else:
self.scaler = torch.cuda.amp.GradScaler()
self.register_stateful('scaler')
def build_dataloaders(self) -> None:
"""Instantiate audio dataloaders for each stage."""
self.dataloaders = builders.get_audio_datasets(self.cfg, dataset_type=self.DATASET_TYPE)
def show(self) -> None:
"""Show the compression model and LM model."""
self.logger.info("Compression model:")
self.log_model_summary(self.compression_model)
self.logger.info("LM model:")
self.log_model_summary(self.model)
def load_state_dict(self, state: dict) -> None:
if 'condition_provider' in state:
model_state = state['model']
condition_provider_state = state.pop('condition_provider')
prefix = 'condition_provider.'
for key, value in condition_provider_state.items():
key = prefix + key
assert key not in model_state
model_state[key] = value
super().load_state_dict(state)
def load_from_pretrained(self, name: str):
# TODO: support native HF versions of MusicGen.
lm_pkg = models.loaders.load_lm_model_ckpt(name)
state: dict = {
'best_state': {
'model': lm_pkg['best_state'],
},
}
return state
def _compute_cross_entropy(
self, logits: torch.Tensor, targets: torch.Tensor, mask: torch.Tensor
) -> tp.Tuple[torch.Tensor, tp.List[torch.Tensor]]:
"""Compute cross entropy between multi-codebook targets and model's logits.
The cross entropy is computed per codebook to provide codebook-level cross entropy.
Valid timesteps for each of the codebook are pulled from the mask, where invalid
timesteps are set to 0.
Args:
logits (torch.Tensor): Model's logits of shape [B, K, T, card].
targets (torch.Tensor): Target codes, of shape [B, K, T].
mask (torch.Tensor): Mask for valid target codes, of shape [B, K, T].
Returns:
ce (torch.Tensor): Cross entropy averaged over the codebooks
ce_per_codebook (list of torch.Tensor): Cross entropy per codebook (detached).
"""
B, K, T = targets.shape
assert logits.shape[:-1] == targets.shape
assert mask.shape == targets.shape
ce = torch.zeros([], device=targets.device)
ce_per_codebook: tp.List[torch.Tensor] = []
for k in range(K):
logits_k = logits[:, k, ...].contiguous().view(-1, logits.size(-1)) # [B x T, card]
targets_k = targets[:, k, ...].contiguous().view(-1) # [B x T]
mask_k = mask[:, k, ...].contiguous().view(-1) # [B x T]
ce_targets = targets_k[mask_k]
ce_logits = logits_k[mask_k]
q_ce = F.cross_entropy(ce_logits, ce_targets)
ce += q_ce
ce_per_codebook.append(q_ce.detach())
# average cross entropy across codebooks
ce = ce / K
return ce, ce_per_codebook
@torch.no_grad()
def _prepare_tokens_and_attributes(
self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]],
check_synchronization_points: bool = False
) -> tp.Tuple[dict, torch.Tensor, torch.Tensor]:
"""Prepare input batchs for language model training.
Args:
batch (tuple[torch.Tensor, list[SegmentWithAttributes]]): Input batch with audio tensor of shape [B, C, T]
and corresponding metadata as SegmentWithAttributes (with B items).
check_synchronization_points (bool): Whether to check for synchronization points slowing down training.
Returns:
Condition tensors (dict[str, any]): Preprocessed condition attributes.
Tokens (torch.Tensor): Audio tokens from compression model, of shape [B, K, T_s],
with B the batch size, K the number of codebooks, T_s the token timesteps.
Padding mask (torch.Tensor): Mask with valid positions in the tokens tensor, of shape [B, K, T_s].
"""
if self._cached_batch_loader is None or self.current_stage != "train":
audio, infos = batch
audio = audio.to(self.device)
audio_tokens = None
assert audio.size(0) == len(infos), (
f"Mismatch between number of items in audio batch ({audio.size(0)})",
f" and in metadata ({len(infos)})"
)
else:
audio = None
# In that case the batch will be a tuple coming from the _cached_batch_writer bit below.
infos, = batch # type: ignore
assert all([isinstance(info, AudioInfo) for info in infos])
assert all([info.audio_tokens is not None for info in infos]) # type: ignore
audio_tokens = torch.stack([info.audio_tokens for info in infos]).to(self.device) # type: ignore
audio_tokens = audio_tokens.long()
for info in infos:
if isinstance(info, MusicInfo):
# Careful here, if you want to use this condition_wav (e.b. chroma conditioning),
# then you must be using the chroma cache! otherwise the code will try
# to use this segment and fail (by that I mean you will see NaN everywhere).
info.self_wav = WavCondition(
torch.full([1, info.channels, info.total_frames], float('NaN')),
length=torch.tensor([info.n_frames]),
sample_rate=[info.sample_rate],
path=[info.meta.path],
seek_time=[info.seek_time])
dataset = get_dataset_from_loader(self.dataloaders['original_train'])
assert isinstance(dataset, MusicDataset), type(dataset)
if dataset.paraphraser is not None and info.description is not None:
# Hackingly reapplying paraphraser when using cache.
info.description = dataset.paraphraser.sample_paraphrase(
info.meta.path, info.description)
# prepare attributes
attributes = [info.to_condition_attributes() for info in infos]
attributes = self.model.cfg_dropout(attributes)
attributes = self.model.att_dropout(attributes)
tokenized = self.model.condition_provider.tokenize(attributes)
# Now we should be synchronization free.
if self.device == "cuda" and check_synchronization_points:
torch.cuda.set_sync_debug_mode("warn")
if audio_tokens is None:
with torch.no_grad():
audio_tokens, scale = self.compression_model.encode(audio)
assert scale is None, "Scaled compression model not supported with LM."
with self.autocast:
condition_tensors = self.model.condition_provider(tokenized)
# create a padding mask to hold valid vs invalid positions
padding_mask = torch.ones_like(audio_tokens, dtype=torch.bool, device=audio_tokens.device)
# replace encodec tokens from padded audio with special_token_id
if self.cfg.tokens.padding_with_special_token:
audio_tokens = audio_tokens.clone()
padding_mask = padding_mask.clone()
token_sample_rate = self.compression_model.frame_rate
B, K, T_s = audio_tokens.shape
for i in range(B):
n_samples = infos[i].n_frames
audio_sample_rate = infos[i].sample_rate
# take the last token generated from actual audio frames (non-padded audio)
valid_tokens = math.floor(float(n_samples) / audio_sample_rate * token_sample_rate)
audio_tokens[i, :, valid_tokens:] = self.model.special_token_id
padding_mask[i, :, valid_tokens:] = 0
if self.device == "cuda" and check_synchronization_points:
torch.cuda.set_sync_debug_mode("default")
if self._cached_batch_writer is not None and self.current_stage == 'train':
assert self._cached_batch_loader is None
assert audio_tokens is not None
for info, one_audio_tokens in zip(infos, audio_tokens):
assert isinstance(info, AudioInfo)
if isinstance(info, MusicInfo):
assert not info.joint_embed, "joint_embed and cache not supported yet."
info.self_wav = None
assert one_audio_tokens.max() < 2**15, one_audio_tokens.max().item()
info.audio_tokens = one_audio_tokens.short().cpu()
self._cached_batch_writer.save(infos)
return condition_tensors, audio_tokens, padding_mask
def run_step(self, idx: int, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]], metrics: dict) -> dict:
"""Perform one training or valid step on a given batch."""
check_synchronization_points = idx == 1 and self.device == 'cuda'
condition_tensors, audio_tokens, padding_mask = self._prepare_tokens_and_attributes(
batch, check_synchronization_points)
self.deadlock_detect.update('tokens_and_conditions')
if check_synchronization_points:
torch.cuda.set_sync_debug_mode('warn')
with self.autocast:
model_output = self.model.compute_predictions(audio_tokens, [], condition_tensors) # type: ignore
logits = model_output.logits
mask = padding_mask & model_output.mask
ce, ce_per_codebook = self._compute_cross_entropy(logits, audio_tokens, mask)
loss = ce
self.deadlock_detect.update('loss')
if check_synchronization_points:
torch.cuda.set_sync_debug_mode('default')
if self.is_training:
metrics['lr'] = self.optimizer.param_groups[0]['lr']
if self.scaler is not None:
loss = self.scaler.scale(loss)
self.deadlock_detect.update('scale')
if self.cfg.fsdp.use:
loss.backward()
flashy.distrib.average_tensors(self.model.buffers())
elif self.cfg.optim.eager_sync:
with flashy.distrib.eager_sync_model(self.model):
loss.backward()
else:
# this should always be slower but can be useful
# for weird use cases like multiple backwards.
loss.backward()
flashy.distrib.sync_model(self.model)
self.deadlock_detect.update('backward')
if self.scaler is not None:
self.scaler.unscale_(self.optimizer)
if self.cfg.optim.max_norm:
if self.cfg.fsdp.use:
metrics['grad_norm'] = self.model.clip_grad_norm_(self.cfg.optim.max_norm) # type: ignore
else:
metrics['grad_norm'] = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.cfg.optim.max_norm
)
if self.scaler is None:
self.optimizer.step()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
if self.lr_scheduler:
self.lr_scheduler.step()
self.optimizer.zero_grad()
self.deadlock_detect.update('optim')
if self.scaler is not None:
scale = self.scaler.get_scale()
metrics['grad_scale'] = scale
if not loss.isfinite().all():
raise RuntimeError("Model probably diverged.")
metrics['ce'] = ce
metrics['ppl'] = torch.exp(ce)
for k, ce_q in enumerate(ce_per_codebook):
metrics[f'ce_q{k + 1}'] = ce_q
metrics[f'ppl_q{k + 1}'] = torch.exp(ce_q)
return metrics
@torch.no_grad()
def run_generate_step(self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]],
gen_duration: float, prompt_duration: tp.Optional[float] = None,
remove_prompt: bool = False,
**generation_params) -> dict:
"""Run generate step on a batch of optional audio tensor and corresponding attributes.
Args:
batch (tuple[torch.Tensor, list[SegmentWithAttributes]]):
use_prompt (bool): Whether to do audio continuation generation with prompt from audio batch.
gen_duration (float): Target audio duration for the generation.
prompt_duration (float, optional): Duration for the audio prompt to use for continuation.
remove_prompt (bool, optional): Whether to remove the prompt from the generated audio.
generation_params: Additional generation parameters.
Returns:
gen_outputs (dict): Generation outputs, consisting in audio, audio tokens from both the generation
and the prompt along with additional information.
"""
bench_start = time.time()
audio, meta = batch
assert audio.size(0) == len(meta), (
f"Mismatch between number of items in audio batch ({audio.size(0)})",
f" and in metadata ({len(meta)})"
)
# prepare attributes
attributes = [x.to_condition_attributes() for x in meta]
# TODO: Add dropout for chroma?
# prepare audio prompt
if prompt_duration is None:
prompt_audio = None
else:
assert prompt_duration < gen_duration, "Prompt duration must be lower than target generation duration"
prompt_audio_frames = int(prompt_duration * self.compression_model.sample_rate)
prompt_audio = audio[..., :prompt_audio_frames]
# get audio tokens from compression model
if prompt_audio is None or prompt_audio.nelement() == 0:
num_samples = len(attributes)
prompt_tokens = None
else:
num_samples = None
prompt_audio = prompt_audio.to(self.device)
prompt_tokens, scale = self.compression_model.encode(prompt_audio)
assert scale is None, "Compression model in MusicGen should not require rescaling."
# generate by sampling from the LM
with self.autocast:
total_gen_len = math.ceil(gen_duration * self.compression_model.frame_rate)
gen_tokens = self.model.generate(
prompt_tokens, attributes, max_gen_len=total_gen_len,
num_samples=num_samples, **self.generation_params)
# generate audio from tokens
assert gen_tokens.dim() == 3
gen_audio = self.compression_model.decode(gen_tokens, None)
bench_end = time.time()
gen_outputs = {
'rtf': (bench_end - bench_start) / gen_duration,
'ref_audio': audio,
'gen_audio': gen_audio,
'gen_tokens': gen_tokens,
'prompt_audio': prompt_audio,
'prompt_tokens': prompt_tokens,
}
return gen_outputs
def generate_audio(self) -> dict:
"""Audio generation stage."""
generate_stage_name = f'{self.current_stage}'
sample_manager = SampleManager(self.xp)
self.logger.info(f"Generating samples in {sample_manager.base_folder}")
loader = self.dataloaders['generate']
updates = len(loader)
lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)
dataset = get_dataset_from_loader(loader)
dataset_duration = dataset.segment_duration
assert dataset_duration is not None
assert isinstance(dataset, AudioDataset)
target_duration = self.cfg.generate.lm.gen_duration
prompt_duration = self.cfg.generate.lm.prompt_duration
if target_duration is None:
target_duration = dataset_duration
if prompt_duration is None:
prompt_duration = dataset_duration / 4
assert prompt_duration < dataset_duration, (
f"Specified prompt duration ({prompt_duration}s) is longer",
f" than reference audio duration ({dataset_duration}s)"
)
def get_hydrated_conditions(meta: tp.List[SegmentWithAttributes]):
hydrated_conditions = []
for sample in [x.to_condition_attributes() for x in meta]:
cond_dict = {}
for cond_type in sample.__annotations__.keys():
for cond_key, cond_val in getattr(sample, cond_type).items():
if cond_key not in self.model.condition_provider.conditioners.keys():
continue
if is_jsonable(cond_val):
cond_dict[cond_key] = cond_val
elif isinstance(cond_val, WavCondition):
cond_dict[cond_key] = cond_val.path
elif isinstance(cond_val, JointEmbedCondition):
cond_dict[cond_key] = cond_val.text # only support text at inference for now
else:
# if we reached this point, it is not clear how to log the condition
# so we just log the type.
cond_dict[cond_key] = str(type(cond_val))
continue
hydrated_conditions.append(cond_dict)
return hydrated_conditions
metrics: dict = {}
average = flashy.averager()
for batch in lp:
audio, meta = batch
# metadata for sample manager
hydrated_conditions = get_hydrated_conditions(meta)
sample_generation_params = {
**{f'classifier_free_guidance_{k}': v for k, v in self.cfg.classifier_free_guidance.items()},
**self.generation_params
}
if self.cfg.generate.lm.unprompted_samples:
if self.cfg.generate.lm.gen_gt_samples:
# get the ground truth instead of generation
self.logger.warn(
"Use ground truth instead of audio generation as generate.lm.gen_gt_samples=true")
gen_unprompted_audio = audio
rtf = 1.
else:
gen_unprompted_outputs = self.run_generate_step(
batch, gen_duration=target_duration, prompt_duration=prompt_duration,
**self.generation_params)
gen_unprompted_audio = gen_unprompted_outputs['gen_audio'].cpu()
rtf = gen_unprompted_outputs['rtf']
sample_manager.add_samples(
gen_unprompted_audio, self.epoch, hydrated_conditions,
ground_truth_wavs=audio, generation_args=sample_generation_params)
if self.cfg.generate.lm.prompted_samples:
gen_outputs = self.run_generate_step(
batch, gen_duration=target_duration, prompt_duration=prompt_duration,
**self.generation_params)
gen_audio = gen_outputs['gen_audio'].cpu()
prompt_audio = gen_outputs['prompt_audio'].cpu()
sample_manager.add_samples(
gen_audio, self.epoch, hydrated_conditions,
prompt_wavs=prompt_audio, ground_truth_wavs=audio,
generation_args=sample_generation_params)
metrics['rtf'] = rtf
metrics = average(metrics)
flashy.distrib.barrier()
return metrics
def generate(self) -> dict:
"""Generate stage."""
self.model.eval()
with torch.no_grad():
return self.generate_audio()
def run_epoch(self):
if self.cfg.cache.write:
if ((self.epoch - 1) % self.cfg.cache.write_num_shards) != self.cfg.cache.write_shard:
return
super().run_epoch()
def train(self):
"""Train stage.
"""
if self._cached_batch_writer is not None:
self._cached_batch_writer.start_epoch(self.epoch)
if self._cached_batch_loader is None:
dataset = get_dataset_from_loader(self.dataloaders['train'])
assert isinstance(dataset, AudioDataset)
dataset.current_epoch = self.epoch
else:
self._cached_batch_loader.start_epoch(self.epoch)
return super().train()
def evaluate_audio_generation(self) -> dict:
"""Evaluate audio generation with off-the-shelf metrics."""
evaluate_stage_name = f'{self.current_stage}_generation'
# instantiate evaluation metrics, if at least one metric is defined, run audio generation evaluation
fad: tp.Optional[eval_metrics.FrechetAudioDistanceMetric] = None
kldiv: tp.Optional[eval_metrics.KLDivergenceMetric] = None
text_consistency: tp.Optional[eval_metrics.TextConsistencyMetric] = None
chroma_cosine: tp.Optional[eval_metrics.ChromaCosineSimilarityMetric] = None
should_run_eval = False
eval_chroma_wavs: tp.Optional[torch.Tensor] = None
if self.cfg.evaluate.metrics.fad:
fad = builders.get_fad(self.cfg.metrics.fad).to(self.device)
should_run_eval = True
if self.cfg.evaluate.metrics.kld:
kldiv = builders.get_kldiv(self.cfg.metrics.kld).to(self.device)
should_run_eval = True
if self.cfg.evaluate.metrics.text_consistency:
text_consistency = builders.get_text_consistency(self.cfg.metrics.text_consistency).to(self.device)
should_run_eval = True
if self.cfg.evaluate.metrics.chroma_cosine:
chroma_cosine = builders.get_chroma_cosine_similarity(self.cfg.metrics.chroma_cosine).to(self.device)
# if we have predefind wavs for chroma we should purge them for computing the cosine metric
has_predefined_eval_chromas = 'self_wav' in self.model.condition_provider.conditioners and \
self.model.condition_provider.conditioners['self_wav'].has_eval_wavs()
if has_predefined_eval_chromas:
warn_once(self.logger, "Attempting to run cosine eval for config with pre-defined eval chromas! "
'Resetting eval chromas to None for evaluation.')
eval_chroma_wavs = self.model.condition_provider.conditioners.self_wav.eval_wavs # type: ignore
self.model.condition_provider.conditioners.self_wav.reset_eval_wavs(None) # type: ignore
should_run_eval = True
def get_compressed_audio(audio: torch.Tensor) -> torch.Tensor:
audio_tokens, scale = self.compression_model.encode(audio.to(self.device))
compressed_audio = self.compression_model.decode(audio_tokens, scale)
return compressed_audio[..., :audio.shape[-1]]
metrics: dict = {}
if should_run_eval:
loader = self.dataloaders['evaluate']
updates = len(loader)
lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)
average = flashy.averager()
dataset = get_dataset_from_loader(loader)
assert isinstance(dataset, AudioDataset)
self.logger.info(f"Computing evaluation metrics on {len(dataset)} samples")
for idx, batch in enumerate(lp):
audio, meta = batch
assert all([self.cfg.sample_rate == m.sample_rate for m in meta])
target_duration = audio.shape[-1] / self.cfg.sample_rate
if self.cfg.evaluate.fixed_generation_duration:
target_duration = self.cfg.evaluate.fixed_generation_duration
gen_outputs = self.run_generate_step(
batch, gen_duration=target_duration,
**self.generation_params
)
y_pred = gen_outputs['gen_audio'].detach()
y_pred = y_pred[..., :audio.shape[-1]]
normalize_kwargs = dict(self.cfg.generate.audio)
normalize_kwargs.pop('format', None)
y_pred = torch.stack([normalize_audio(w, **normalize_kwargs) for w in y_pred], dim=0).cpu()
y = audio.cpu() # should already be on CPU but just in case
sizes = torch.tensor([m.n_frames for m in meta]) # actual sizes without padding
sample_rates = torch.tensor([m.sample_rate for m in meta]) # sample rates for audio samples
audio_stems = [Path(m.meta.path).stem + f"_{m.seek_time}" for m in meta]
if fad is not None:
if self.cfg.metrics.fad.use_gt:
y_pred = get_compressed_audio(y).cpu()
fad.update(y_pred, y, sizes, sample_rates, audio_stems)
if kldiv is not None:
if self.cfg.metrics.kld.use_gt:
y_pred = get_compressed_audio(y).cpu()
kldiv.update(y_pred, y, sizes, sample_rates)
if text_consistency is not None:
texts = [m.description for m in meta]
if self.cfg.metrics.text_consistency.use_gt:
y_pred = y
text_consistency.update(y_pred, texts, sizes, sample_rates)
if chroma_cosine is not None:
if self.cfg.metrics.chroma_cosine.use_gt:
y_pred = get_compressed_audio(y).cpu()
chroma_cosine.update(y_pred, y, sizes, sample_rates)
# restore chroma conditioner's eval chroma wavs
if eval_chroma_wavs is not None:
self.model.condition_provider.conditioners['self_wav'].reset_eval_wavs(eval_chroma_wavs)
flashy.distrib.barrier()
if fad is not None:
metrics['fad'] = fad.compute()
if kldiv is not None:
kld_metrics = kldiv.compute()
metrics.update(kld_metrics)
if text_consistency is not None:
metrics['text_consistency'] = text_consistency.compute()
if chroma_cosine is not None:
metrics['chroma_cosine'] = chroma_cosine.compute()
metrics = average(metrics)
metrics = flashy.distrib.average_metrics(metrics, len(loader))
return metrics
def evaluate(self) -> dict:
"""Evaluate stage."""
self.model.eval()
with torch.no_grad():
metrics: dict = {}
if self.cfg.evaluate.metrics.base:
metrics.update(self.common_train_valid('evaluate'))
gen_metrics = self.evaluate_audio_generation()
return {**metrics, **gen_metrics}
| audiocraft-main | audiocraft/solvers/musicgen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
from pathlib import Path
import typing as tp
import flashy
import omegaconf
import torch
from torch import nn
from . import base, builders
from .. import models, quantization
from ..utils import checkpoint
from ..utils.samples.manager import SampleManager
from ..utils.utils import get_pool_executor
logger = logging.getLogger(__name__)
class CompressionSolver(base.StandardSolver):
"""Solver for compression task.
The compression task combines a set of perceptual and objective losses
to train an EncodecModel (composed of an encoder-decoder and a quantizer)
to perform high fidelity audio reconstruction.
"""
def __init__(self, cfg: omegaconf.DictConfig):
super().__init__(cfg)
self.rng: torch.Generator # set at each epoch
self.adv_losses = builders.get_adversarial_losses(self.cfg)
self.aux_losses = nn.ModuleDict()
self.info_losses = nn.ModuleDict()
assert not cfg.fsdp.use, "FSDP not supported by CompressionSolver."
loss_weights = dict()
for loss_name, weight in self.cfg.losses.items():
if loss_name in ['adv', 'feat']:
for adv_name, _ in self.adv_losses.items():
loss_weights[f'{loss_name}_{adv_name}'] = weight
elif weight > 0:
self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)
loss_weights[loss_name] = weight
else:
self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)
self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)
self.register_stateful('adv_losses')
@property
def best_metric_name(self) -> tp.Optional[str]:
# best model is the last for the compression model
return None
def build_model(self):
"""Instantiate model and optimizer."""
# Model and optimizer
self.model = models.builders.get_compression_model(self.cfg).to(self.device)
self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)
self.register_stateful('model', 'optimizer')
self.register_best_state('model')
self.register_ema('model')
def build_dataloaders(self):
"""Instantiate audio dataloaders for each stage."""
self.dataloaders = builders.get_audio_datasets(self.cfg)
def show(self):
"""Show the compression model and employed adversarial loss."""
self.logger.info(f"Compression model with {self.model.quantizer.total_codebooks} codebooks:")
self.log_model_summary(self.model)
self.logger.info("Adversarial loss:")
self.log_model_summary(self.adv_losses)
self.logger.info("Auxiliary losses:")
self.logger.info(self.aux_losses)
self.logger.info("Info losses:")
self.logger.info(self.info_losses)
def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):
"""Perform one training or valid step on a given batch."""
x = batch.to(self.device)
y = x.clone()
qres = self.model(x)
assert isinstance(qres, quantization.QuantizedResult)
y_pred = qres.x
# Log bandwidth in kb/s
metrics['bandwidth'] = qres.bandwidth.mean()
if self.is_training:
d_losses: dict = {}
if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:
for adv_name, adversary in self.adv_losses.items():
disc_loss = adversary.train_adv(y_pred, y)
d_losses[f'd_{adv_name}'] = disc_loss
metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))
metrics.update(d_losses)
balanced_losses: dict = {}
other_losses: dict = {}
# penalty from quantization
if qres.penalty is not None and qres.penalty.requires_grad:
other_losses['penalty'] = qres.penalty # penalty term from the quantizer
# adversarial losses
for adv_name, adversary in self.adv_losses.items():
adv_loss, feat_loss = adversary(y_pred, y)
balanced_losses[f'adv_{adv_name}'] = adv_loss
balanced_losses[f'feat_{adv_name}'] = feat_loss
# auxiliary losses
for loss_name, criterion in self.aux_losses.items():
loss = criterion(y_pred, y)
balanced_losses[loss_name] = loss
# weighted losses
metrics.update(balanced_losses)
metrics.update(other_losses)
metrics.update(qres.metrics)
if self.is_training:
# backprop losses that are not handled by balancer
other_loss = torch.tensor(0., device=self.device)
if 'penalty' in other_losses:
other_loss += other_losses['penalty']
if other_loss.requires_grad:
other_loss.backward(retain_graph=True)
ratio1 = sum(p.grad.data.norm(p=2).pow(2)
for p in self.model.parameters() if p.grad is not None)
assert isinstance(ratio1, torch.Tensor)
metrics['ratio1'] = ratio1.sqrt()
# balancer losses backward, returns effective training loss
# with effective weights at the current batch.
metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)
# add metrics corresponding to weight ratios
metrics.update(self.balancer.metrics)
ratio2 = sum(p.grad.data.norm(p=2).pow(2)
for p in self.model.parameters() if p.grad is not None)
assert isinstance(ratio2, torch.Tensor)
metrics['ratio2'] = ratio2.sqrt()
# optim
flashy.distrib.sync_model(self.model)
if self.cfg.optim.max_norm:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.cfg.optim.max_norm
)
self.optimizer.step()
self.optimizer.zero_grad()
# informative losses only
info_losses: dict = {}
with torch.no_grad():
for loss_name, criterion in self.info_losses.items():
loss = criterion(y_pred, y)
info_losses[loss_name] = loss
metrics.update(info_losses)
# aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups
adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]
if len(adv_losses) > 0:
metrics['adv'] = torch.sum(torch.stack(adv_losses))
feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]
if len(feat_losses) > 0:
metrics['feat'] = torch.sum(torch.stack(feat_losses))
return metrics
def run_epoch(self):
# reset random seed at the beginning of the epoch
self.rng = torch.Generator()
self.rng.manual_seed(1234 + self.epoch)
# run epoch
super().run_epoch()
def evaluate(self):
"""Evaluate stage. Runs audio reconstruction evaluation."""
self.model.eval()
evaluate_stage_name = str(self.current_stage)
loader = self.dataloaders['evaluate']
updates = len(loader)
lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)
average = flashy.averager()
pendings = []
ctx = multiprocessing.get_context('spawn')
with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:
for idx, batch in enumerate(lp):
x = batch.to(self.device)
with torch.no_grad():
qres = self.model(x)
y_pred = qres.x.cpu()
y = batch.cpu() # should already be on CPU but just in case
pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))
metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)
for pending in metrics_lp:
metrics = pending.result()
metrics = average(metrics)
metrics = flashy.distrib.average_metrics(metrics, len(loader))
return metrics
def generate(self):
"""Generate stage."""
self.model.eval()
sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)
generate_stage_name = str(self.current_stage)
loader = self.dataloaders['generate']
updates = len(loader)
lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)
for batch in lp:
reference, _ = batch
reference = reference.to(self.device)
with torch.no_grad():
qres = self.model(reference)
assert isinstance(qres, quantization.QuantizedResult)
reference = reference.cpu()
estimate = qres.x.cpu()
sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)
flashy.distrib.barrier()
def load_from_pretrained(self, name: str) -> dict:
model = models.CompressionModel.get_pretrained(name)
if isinstance(model, models.DAC):
raise RuntimeError("Cannot fine tune a DAC model.")
elif isinstance(model, models.HFEncodecCompressionModel):
self.logger.warning('Trying to automatically convert a HuggingFace model '
'to AudioCraft, this might fail!')
state = model.model.state_dict()
new_state = {}
for k, v in state.items():
if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:
# We need to determine if this a convtr or a regular conv.
layer = int(k.split('.')[2])
if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):
k = k.replace('.conv.', '.convtr.')
k = k.replace('encoder.layers.', 'encoder.model.')
k = k.replace('decoder.layers.', 'decoder.model.')
k = k.replace('conv.', 'conv.conv.')
k = k.replace('convtr.', 'convtr.convtr.')
k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')
k = k.replace('.codebook.', '._codebook.')
new_state[k] = v
state = new_state
elif isinstance(model, models.EncodecModel):
state = model.state_dict()
else:
raise RuntimeError(f"Cannot fine tune model type {type(model)}.")
return {
'best_state': {'model': state}
}
@staticmethod
def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],
device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:
"""Instantiate a CompressionModel from a given checkpoint path or dora sig.
This method is a convenient endpoint to load a CompressionModel to use in other solvers.
Args:
checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.
This also supports pre-trained models by using a path of the form //pretrained/NAME.
See `model_from_pretrained` for a list of supported pretrained models.
use_ema (bool): Use EMA variant of the model instead of the actual model.
device (torch.device or str): Device on which the model is loaded.
"""
checkpoint_path = str(checkpoint_path)
if checkpoint_path.startswith('//pretrained/'):
name = checkpoint_path.split('/', 3)[-1]
return models.CompressionModel.get_pretrained(name, device)
logger = logging.getLogger(__name__)
logger.info(f"Loading compression model from checkpoint: {checkpoint_path}")
_checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)
assert _checkpoint_path is not None, f"Could not resolve compression model checkpoint path: {checkpoint_path}"
state = checkpoint.load_checkpoint(_checkpoint_path)
assert state is not None and 'xp.cfg' in state, f"Could not load compression model from ckpt: {checkpoint_path}"
cfg = state['xp.cfg']
cfg.device = device
compression_model = models.builders.get_compression_model(cfg).to(device)
assert compression_model.sample_rate == cfg.sample_rate, "Compression model sample rate should match"
assert 'best_state' in state and state['best_state'] != {}
assert 'exported' not in state, "When loading an exported checkpoint, use the //pretrained/ prefix."
compression_model.load_state_dict(state['best_state']['model'])
compression_model.eval()
logger.info("Compression model loaded!")
return compression_model
@staticmethod
def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,
checkpoint_path: tp.Union[Path, str],
device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:
"""Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.
Args:
cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.
checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.
use_ema (bool): Use EMA variant of the model instead of the actual model.
device (torch.device or str): Device on which the model is loaded.
"""
compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)
compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)
return compression_model
def evaluate_audio_reconstruction(y_pred: torch.Tensor, y: torch.Tensor, cfg: omegaconf.DictConfig) -> dict:
"""Audio reconstruction evaluation method that can be conveniently pickled."""
metrics = {}
if cfg.evaluate.metrics.visqol:
visqol = builders.get_visqol(cfg.metrics.visqol)
metrics['visqol'] = visqol(y_pred, y, cfg.sample_rate)
sisnr = builders.get_loss('sisnr', cfg)
metrics['sisnr'] = sisnr(y_pred, y)
return metrics
| audiocraft-main | audiocraft/solvers/compression.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Solvers. A Solver is a training recipe, combining the dataloaders, models,
optimizer, losses etc into a single convenient object.
"""
# flake8: noqa
from .audiogen import AudioGenSolver
from .builders import get_solver
from .base import StandardSolver
from .compression import CompressionSolver
from .musicgen import MusicGenSolver
from .diffusion import DiffusionSolver
| audiocraft-main | audiocraft/solvers/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from . import builders, musicgen
class AudioGenSolver(musicgen.MusicGenSolver):
"""Solver for AudioGen re-implementation training task.
Note that this implementation does not strictly follows
the method proposed in https://arxiv.org/abs/2209.15352
but is derived from MusicGen's training pipeline.
More information can be found in the AudioGen model card.
"""
DATASET_TYPE: builders.DatasetType = builders.DatasetType.SOUND
| audiocraft-main | audiocraft/solvers/audiogen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from contextlib import contextmanager
from pathlib import Path
import typing as tp
import flashy
import omegaconf
import torch
from torch import nn
from .. import optim
from ..optim import fsdp
from ..utils import checkpoint
from ..utils.autocast import TorchAutocast
from ..utils.best_state import BestStateDictManager
from ..utils.deadlock import DeadlockDetect
from ..utils.profiler import Profiler
from ..utils.utils import copy_state, dict_from_config, model_hash, with_rank_rng
class StandardSolver(ABC, flashy.BaseSolver):
"""Standard solver for AudioCraft.
The standard solver implements a base training loop with the following stages:
train, valid, evaluate and generate that are expected to be all defined for
solvers in AudioCraft. It also provides a nice default management of Dora history replay,
checkpoint management across epoch, and logging configuration.
AudioCraft solvers must inherit from the StandardSolver and define the methods
associated to each stage as well as the show, build_model and build_dataloaders methods.
"""
def __init__(self, cfg: omegaconf.DictConfig):
super().__init__()
self.logger.info(f"Instantiating solver {self.__class__.__name__} for XP {self.xp.sig}")
self.logger.info(f"All XP logs are stored in {self.xp.folder}")
self.cfg = cfg
self.device = cfg.device
self.model: nn.Module
self._continue_best_source_keys = ['best_state', 'fsdp_best_state']
self._fsdp_modules: tp.List[fsdp.FSDP] = []
self._ema_sources: nn.ModuleDict = nn.ModuleDict()
self.ema: tp.Optional[optim.ModuleDictEMA] = None
self.dataloaders: tp.Dict[str, torch.utils.data.DataLoader] = dict()
self._log_updates = self.cfg.logging.get('log_updates', 10)
if self.cfg.logging.log_tensorboard:
self.init_tensorboard(**self.cfg.get('tensorboard'))
if self.cfg.logging.log_wandb and self:
self.init_wandb(**self.cfg.get('wandb'))
# keep a copy of the best performing state for stateful objects
# used for evaluation and generation stages
dtype_best: tp.Optional[torch.dtype] = None
if self.cfg.fsdp.use:
dtype_best = getattr(torch, self.cfg.fsdp.param_dtype) # type: ignore
assert isinstance(dtype_best, torch.dtype)
elif self.cfg.autocast:
dtype_best = getattr(torch, self.cfg.autocast_dtype) # type: ignore
assert isinstance(dtype_best, torch.dtype)
self.best_state: BestStateDictManager = BestStateDictManager(dtype=dtype_best)
# Hacky support for keeping a copy of the full best state in rank0.
self.fsdp_best_state: tp.Dict[str, tp.Any] = {}
self.register_stateful('best_state', 'fsdp_best_state') # register best_state object to keep it in state_dict
self._new_best_state: bool = False # should save a new checkpoint
# instantiate datasets and appropriate number of updates per epoch
self.build_dataloaders()
if self.cfg.execute_only is None:
assert 'train' in self.dataloaders, "The train dataset split must be provided."
assert 'valid' in self.dataloaders, "The valid dataset split must be provided."
self.train_updates_per_epoch = len(self.dataloaders['train']) if 'train' in self.dataloaders else 0
if self.cfg.optim.updates_per_epoch:
self.train_updates_per_epoch = self.cfg.optim.updates_per_epoch
self.total_updates = self.train_updates_per_epoch * self.cfg.optim.epochs
# instantiate model & exponential moving average on the model
self.build_model()
self.logger.info("Model hash: %s", model_hash(self.model))
assert 'model' in self.stateful.sources, \
"Please register the model to stateful with self.register_stateful('model') in build_model."
self.profiler = Profiler(self.model, **self.cfg.profiler)
self.initialize_ema()
self.register_stateful('ema')
assert self.ema is None or 'ema' in self.stateful.sources, \
"Please register the ema to stateful with self.register_stateful('ema') in build_model."
self.deadlock_detect = DeadlockDetect(**self.cfg.deadlock)
# basic statistics on the trained model
model_size = sum(p.numel() for p in self.model.parameters() if p.requires_grad) / 1e6
# one copy of grad, one copy of momentum, one copy of denominator and model weights.
# and 4 bytes for each float!
mem_usage = model_size * 4 * 4 / 1000
self.logger.info("Model size: %.2f M params", model_size)
self.logger.info("Base memory usage, with model, grad and optim: %.2f GB", mem_usage)
@property
def autocast(self):
"""Convenient autocast (or not) using the solver configuration."""
return TorchAutocast(enabled=self.cfg.autocast, device_type=self.device, dtype=self.autocast_dtype)
def _get_state_source(self, name) -> flashy.state.StateDictSource:
# Internal utility to get a state source from the solver
return self.stateful.sources[name]
@property
def best_metric_name(self) -> tp.Optional[str]:
"""Metric name used to identify the best state. This metric should be stored in the metrics
used on the stage for best state identification (most likely, `valid`). If None, then
no best state is saved.
"""
return None
def register_best_state(self, *args: str):
"""Register state sources in `BestStateDictManager` to keep their best states along with their
latest states. The best state will be used at evaluation stages instead of the latest states.
Shortcut around `BestStateDictManager.register` method. You can pass any number of
attribute, included nested attributes and those will be included into the checkpoints
and automatically restored when `BaseSolver.restore` is called.
"""
for name in args:
state_source = self._get_state_source(name)
assert name in self.stateful.sources, "Registered states in best should be registered in stateful first!"
self.best_state.register(name, state_source)
def register_ema(self, *args: str):
"""Register state sources for exponential moving average.
The registered sources are used to instantiate a ModuleDictEMA instance.
The ModuleDictEMA keeps a `nn.ModuleDict` module that is updated when self.ema.step() is called
and swapped with the original state sources with self.swap_ema_state() method.
Usage:
self.register_ema('model')
"""
assert self.ema is None, "Cannot register state source to already instantiated EMA."
for name in args:
self._ema_sources[name] = getattr(self, name)
def wrap_with_fsdp(self, model: torch.nn.Module, *args, **kwargs):
model = fsdp.wrap_with_fsdp(self.cfg.fsdp, model, *args, **kwargs)
if isinstance(model, fsdp.FSDP):
self._fsdp_modules.append(model)
return model
def update_best_state_from_stage(self, stage_name: str = 'valid'):
"""Update latest best state based on pending metrics of a given stage. This method relies
on the `BestStateDictManager.update` method to update the best state_dict with latest weights
if the registered states happen to match to the best performing setup.
"""
if self.best_metric_name is None:
# when no best metric is defined, the last state is always the best
self._new_best_state = True
self.logger.info("Updating best state with current state.")
else:
assert stage_name in self._pending_metrics, f"Metrics for stage {stage_name} not found."
assert self.best_metric_name in self._pending_metrics[stage_name], \
f"Best metric not found in {stage_name} metrics. Cannot register best state"
current_score = self._pending_metrics[stage_name][self.best_metric_name]
all_best_metric_scores = [
past_metrics[stage_name][self.best_metric_name]
for past_metrics in self.history
]
all_best_metric_scores.append(current_score)
best_score = min(all_best_metric_scores)
self._new_best_state = current_score == best_score
if self._new_best_state:
old_best = min(all_best_metric_scores[:-1] + [float('inf')])
self.logger.info(
f"New best state with {self.best_metric_name}={current_score:.3f} (was {old_best:.3f})")
if self._new_best_state:
if self.cfg.fsdp.use:
# this will give an empty state dict on all ranks but the rank 0
# which will have a copy in memory of the full model.
with fsdp.switch_to_full_state_dict(self._fsdp_modules):
for name in self.best_state.states.keys():
state_source = self._get_state_source(name)
self.best_state.update(name, state_source)
# we save to a different dict.
self.fsdp_best_state.update(self.best_state.state_dict())
# We cannot efficiently load fsdp_best_state when using FSDP,
# so we have do do a second pass, with the local shards.
for name in self.best_state.states.keys():
state_source = self._get_state_source(name)
self.best_state.update(name, state_source)
def _load_new_state_dict(self, state_dict: dict) -> dict:
old_states = {}
for name, new_state in state_dict.items():
state_source = self._get_state_source(name)
old_states[name] = copy_state(state_source.state_dict())
state_source.load_state_dict(new_state)
return old_states
@contextmanager
def swap_best_state(self):
self.logger.debug(f"Swapping to best state for: {', '.join(self.best_state.state_dict().keys())}")
old_states = self._load_new_state_dict(self.best_state.state_dict())
try:
yield
finally:
self.logger.debug("Swapping back from best to original state")
for name, old_state in old_states.items():
state_source = self._get_state_source(name)
state_source.load_state_dict(old_state)
@contextmanager
def swap_ema_state(self):
if self.ema is None:
yield
else:
ema_state_dict = self.ema.state_dict()['state']
self.logger.debug(f"Swapping to EMA state for: {', '.join(ema_state_dict.keys())}")
old_states = self._load_new_state_dict(ema_state_dict)
try:
yield
finally:
self.logger.debug("Swapping back from EMA state to original state")
for name, old_state in old_states.items():
state_source = self._get_state_source(name)
state_source.load_state_dict(old_state)
@property
def is_training(self):
return self.current_stage == 'train'
def log_model_summary(self, model: nn.Module):
"""Log model summary, architecture and size of the model."""
self.logger.info(model)
mb = sum(p.numel() for p in model.parameters()) * 4 / 2 ** 20
self.logger.info("Size: %.1f MB", mb)
@abstractmethod
def build_model(self):
"""Method to implement to initialize model."""
...
def initialize_ema(self):
"""Initialize exponential moving average with the registered sources.
EMA object is created if the optim.ema.model.decay value is non-null.
"""
from .builders import get_ema
self.ema = get_ema(self._ema_sources, self.cfg.optim.ema)
if self.ema is None:
self.logger.info('No EMA on the model.')
else:
assert self.cfg.optim.ema.updates > 0
self.logger.info(
f'Initializing EMA on the model with decay = {self.ema.decay}'
f' every {self.cfg.optim.ema.updates} updates'
)
@abstractmethod
def build_dataloaders(self):
"""Method to implement to initialize dataloaders."""
...
@abstractmethod
def show(self):
"""Method to log any information without running the job."""
...
@property
def log_updates(self):
# convenient access to log updates
return self._log_updates
def checkpoint_path(self, **kwargs):
kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
return self.folder / checkpoint.checkpoint_name(**kwargs)
def epoch_checkpoint_path(self, epoch: int, **kwargs):
kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
return self.folder / checkpoint.checkpoint_name(str(epoch), **kwargs)
def checkpoint_path_with_name(self, name: str, **kwargs):
kwargs.setdefault('use_fsdp', self.cfg.fsdp.use)
return self.folder / checkpoint.checkpoint_name(name=name, **kwargs)
def save_checkpoints(self):
"""Save checkpoint, optionally keeping a copy for a given epoch."""
is_sharded = self.cfg.fsdp.use
if not flashy.distrib.is_rank_zero() and not is_sharded:
return
self.logger.info("Model hash: %s", model_hash(self.model))
state = self.state_dict()
epoch = self.epoch - 1 # pushing metrics will increase the epoch in Flashy, so we do -1 here
# save minimal state_dict as new checkpoint every X epoch
if self.cfg.checkpoint.save_every:
if epoch % self.cfg.checkpoint.save_every == 0:
minimal_state = state
if self.cfg.checkpoint.keep_every_states is not None and len(self.cfg.checkpoint.keep_every_states) > 0:
minimal_state = {
name: source for name, source in state.items()
if name in self.cfg.checkpoint.keep_every_states
}
epoch_checkpoint_path = self.epoch_checkpoint_path(epoch)
checkpoint.save_checkpoint(minimal_state, epoch_checkpoint_path, is_sharded)
# save checkpoint as latest checkpoint
if self.cfg.checkpoint.save_last:
last_checkpoint_path = self.checkpoint_path()
checkpoint.save_checkpoint(state, last_checkpoint_path, is_sharded)
# flush any stale checkpoint to reduce disk footprint
checkpoint.flush_stale_checkpoints(self.checkpoint_path())
def load_from_pretrained(self, name: str) -> dict:
raise NotImplementedError("Solver does not provide a way to load pretrained models.")
def load_checkpoints(self, load_best: bool = False, ignore_state_keys: tp.List[str] = []) -> tp.Optional[dict]:
"""Load last checkpoint or the one specified in continue_from.
Args:
load_best (bool): Whether to load from best state dict or not.
Best state dict is always used when not loading the current xp.
ignore_state_keys (list of str): List of sources to ignore when loading the state, e.g. `optimizer`.
Returns:
state (dict, optional): The loaded state dictionary.
"""
# load checkpoints from xp folder or cfg.continue_from
is_sharded = self.cfg.fsdp.use
load_from_path: tp.Optional[Path] = None
checkpoint_source: tp.Optional[checkpoint.CheckpointSource] = None
if load_best:
self.logger.info("Trying to load state_dict from best state.")
state: tp.Optional[dict] = None
rank0_checkpoint_path = self.checkpoint_path(use_fsdp=False)
current_checkpoint_path = self.checkpoint_path()
_pretrained_prefix = '//pretrained/'
continue_pretrained = (self.cfg.continue_from or '').startswith(_pretrained_prefix)
if rank0_checkpoint_path.exists():
self.logger.info(f"Loading existing checkpoint: {current_checkpoint_path}")
load_from_path = current_checkpoint_path
checkpoint.check_sharded_checkpoint(current_checkpoint_path, rank0_checkpoint_path)
checkpoint_source = checkpoint.CheckpointSource.CURRENT_XP
elif self.cfg.continue_from and not continue_pretrained:
self.logger.info(f"Continuing from provided checkpoint: {self.cfg.continue_from}")
# we're always continuing from consolidated checkpoints: self.cfg.use_fsdp and not continue_best
load_from_path = checkpoint.resolve_checkpoint_path(self.cfg.continue_from, use_fsdp=False)
if load_from_path is None:
self.logger.error('Could not resolve the continue_from checkpoint %s', self.cfg.continue_from)
raise RuntimeError(f'Could not resolve continue_from checkpoint {self.cfg.continue_from}')
checkpoint_source = checkpoint.CheckpointSource.OTHER
if load_from_path is not None:
state = checkpoint.load_checkpoint(load_from_path, is_sharded)
elif continue_pretrained:
self.logger.info("Loading a pretrained model. Ignoring 'load_best' and 'ignore_state_keys' params.")
state = self.load_from_pretrained(self.cfg.continue_from[len(_pretrained_prefix):])
checkpoint_source = checkpoint.CheckpointSource.PRETRAINED
load_best = True
# checkpoints are not from the current xp, we only retrieve the best state
if checkpoint_source is not None and checkpoint_source != checkpoint.CheckpointSource.CURRENT_XP:
assert state is not None
self.logger.info("Checkpoint source is not the current xp: Load state_dict from best state.")
load_best = True
state = {key: state[key] for key in self._continue_best_source_keys if key in state}
# loaded checkpoints are FSDP checkpoints: we're reading the best state
# from FSDP and we drop the regular best_state
if 'fsdp_best_state' in state and state['fsdp_best_state']:
state.pop('best_state', None)
self.logger.info("... Loaded checkpoint has FSDP best state")
# FSDP is enabled in the solver, if the loaded checkpoints do not have FSDP support
# then we're initializing FSDP best state with the regular best state
elif self.cfg.fsdp.use:
if 'fsdp_best_state' not in state or not state['fsdp_best_state']:
# we swap non-FSDP checkpoints best_state to FSDP-compatible best state
state['fsdp_best_state'] = state.pop('best_state')
self.logger.info("... Loaded checkpoint does not have FSDP best state. Use regular best state")
if state is not None:
if load_best:
self.logger.info("Ignoring keys when loading best %r", ignore_state_keys)
for key in set(ignore_state_keys):
if key in state:
state.pop(key)
has_best_state = 'best_state' in state or 'fsdp_best_state' in state
assert has_best_state, ("Trying to load best state but neither 'best_state'",
" or 'fsdp_best_state' found in checkpoints.")
self.load_state_dict(state)
# for FSDP, let's make extra sure nothing bad happened with out of sync
# checkpoints across workers.
epoch = float(self.epoch)
avg_epoch = flashy.distrib.average_metrics({'epoch': epoch})['epoch']
if avg_epoch != epoch:
raise RuntimeError(
f"Inconsistent loading of checkpoints happened, our epoch is {epoch} "
f"but average of epochs is {avg_epoch}, at least one gpu must have a "
"different epoch number.")
# on load_best, properly reinitialize state_dict, best states and ema
# otherwise we load from the current xp and don't alter anything
if load_best:
self.logger.info("Loading state_dict from best state.")
if not self.cfg.fsdp.use and self.fsdp_best_state:
# loading from an FSDP checkpoint but with FSDP deactivated
self.logger.info("... Loading from FSDP best state dict.")
self.best_state.load_state_dict(self.fsdp_best_state)
# if load_best, we permanently override the regular state_dict with the best state
if self.cfg.fsdp.use:
self.logger.info("FSDP is used, loading from FSDP best state.")
with fsdp.switch_to_full_state_dict(self._fsdp_modules):
# this might be really fragile but okay for now.
self.load_state_dict(self.fsdp_best_state)
else:
# we permanently swap the stateful objects to their best state
self._load_new_state_dict(self.best_state.state_dict())
# the EMA modules should also be instantiated with best state.
# the easiest way to do so is to reinitialize a new EMA with best state loaded.
if self.ema is not None:
self.logger.info("Re-initializing EMA from best state")
self.initialize_ema()
if self.cfg.fsdp.use:
self.logger.info("Re-initializing best state after using FSDP best state.")
for name in self.best_state.states.keys():
state_source = self._get_state_source(name)
self.best_state.update(name, state_source)
return state
def restore(self, load_best: bool = False, replay_metrics: bool = False,
ignore_state_keys: tp.List[str] = []) -> bool:
"""Restore the status of a solver for a given xp.
Args:
load_best (bool): if `True`, load the best state from the checkpoint.
replay_metrics (bool): if `True`, logs all the metrics from past epochs.
ignore_state_keys (list of str): list of sources to ignore when loading the state, e.g. `optimizer`.
"""
self.logger.info("Restoring weights and history.")
restored_checkpoints = self.load_checkpoints(load_best, ignore_state_keys)
self.logger.info("Model hash: %s", model_hash(self.model))
if replay_metrics and len(self.history) > 0:
self.logger.info("Replaying past metrics...")
for epoch, stages in enumerate(self.history):
for stage_name, metrics in stages.items():
# We manually log the metrics summary to the result logger
# as we don't want to add them to the pending metrics
self.result_logger._log_summary(stage_name, metrics, step=epoch + 1, step_name='epoch',
formatter=self.get_formatter(stage_name))
return restored_checkpoints is not None
def commit(self, save_checkpoints: bool = True):
"""Commit metrics to dora and save checkpoints at the end of an epoch."""
# we override commit to introduce more complex checkpoint saving behaviors
self.history.append(self._pending_metrics) # This will increase self.epoch
if save_checkpoints:
self.save_checkpoints()
self._start_epoch()
if flashy.distrib.is_rank_zero():
self.xp.link.update_history(self.history)
def run_epoch(self):
"""Run a single epoch with all stages.
Metrics for a given stage are stored in _pending_metrics and committed by the solver afterwards.
Children solvers can extend this method with custom behavior, e.g.:
def run_epoch(self):
... # custom code
super().run_epoch()
... # custom code
"""
self.run_stage('train', self.train)
with torch.no_grad():
with self.swap_ema_state():
self.run_stage('valid', self.valid)
# the best state is updated with EMA states if available
self.update_best_state_from_stage('valid')
with self.swap_best_state():
if self.should_run_stage('evaluate'):
self.run_stage('evaluate', self.evaluate)
if self.should_run_stage('generate'):
self.run_stage('generate', with_rank_rng()(self.generate))
def run(self):
"""Training loop."""
assert len(self.state_dict()) > 0
self.restore(replay_metrics=True) # load checkpoint and replay history
self.log_hyperparams(dict_from_config(self.cfg))
for epoch in range(self.epoch, self.cfg.optim.epochs + 1):
if self.should_stop_training():
return
self.run_epoch()
# Commit will send the metrics to Dora and save checkpoints by default.
self.commit()
def should_stop_training(self) -> bool:
"""Check whether we should stop training or not."""
return self.epoch > self.cfg.optim.epochs
def should_run_stage(self, stage_name) -> bool:
"""Check whether we want to run the specified stages."""
stage_every = self.cfg[stage_name].get('every', None)
is_last_epoch = self.epoch == self.cfg.optim.epochs
is_epoch_every = (stage_every and self.epoch % stage_every == 0)
return is_last_epoch or is_epoch_every
@abstractmethod
def run_step(self, idx: int, batch: tp.Any, metrics: dict):
"""Perform one training or valid step on a given batch."""
...
def common_train_valid(self, dataset_split: str, **kwargs: tp.Any):
"""Common logic for train and valid stages."""
self.model.train(self.is_training)
loader = self.dataloaders[dataset_split]
# get a different order for distributed training, otherwise this will get ignored
if flashy.distrib.world_size() > 1 \
and isinstance(loader.sampler, torch.utils.data.distributed.DistributedSampler):
loader.sampler.set_epoch(self.epoch)
updates_per_epoch = self.train_updates_per_epoch if self.is_training else len(loader)
if self.cfg.benchmark_no_load:
self.logger.warning("Fake loading for benchmarking: re-using first batch")
batch = next(iter(loader))
loader = [batch] * updates_per_epoch # type: ignore
lp = self.log_progress(self.current_stage, loader, total=updates_per_epoch, updates=self.log_updates)
average = flashy.averager() # epoch wise average
instant_average = flashy.averager() # average between two logging
metrics: dict = {}
with self.profiler, self.deadlock_detect: # profiler will only run for the first 20 updates.
for idx, batch in enumerate(lp):
self.deadlock_detect.update('batch')
if idx >= updates_per_epoch:
break
metrics = {}
metrics = self.run_step(idx, batch, metrics)
self.deadlock_detect.update('step')
# run EMA step
if self.ema is not None and self.is_training and (idx + 1) % self.cfg.optim.ema.updates == 0:
self.logger.debug("EMA model step")
self.ema.step()
self.deadlock_detect.update('ema')
self.profiler.step()
instant_metrics = instant_average(metrics)
if lp.update(**instant_metrics):
instant_average = flashy.averager() # reset averager between two logging
metrics = average(metrics) # epoch wise average
self.deadlock_detect.update('end_batch')
metrics = flashy.distrib.average_metrics(metrics, updates_per_epoch)
return metrics
def train(self):
"""Train stage."""
return self.common_train_valid('train')
def valid(self):
"""Valid stage."""
return self.common_train_valid('valid')
@abstractmethod
def evaluate(self):
"""Evaluate stage."""
...
@abstractmethod
def generate(self):
"""Generate stage."""
...
def run_one_stage(self, stage_name: str):
"""Run only the specified stage.
This method is useful to only generate samples from a trained experiment
or rerun the validation or evaluation stages.
"""
fn = {
'generate': with_rank_rng()(self.generate),
'evaluate': self.evaluate,
'valid': self.valid,
}
if stage_name not in fn:
raise ValueError(f'Trying to run stage {stage_name} is not supported.')
assert len(self.state_dict()) > 0
self._start_epoch()
with torch.no_grad(), self.swap_best_state():
self.run_stage(stage_name, fn[stage_name])
if not self.cfg.execute_inplace:
self.commit(save_checkpoints=False)
@staticmethod
def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None,
device: tp.Optional[str] = None, autocast: bool = True,
batch_size: tp.Optional[int] = None,
override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
**kwargs):
"""Mostly a convenience function around audiocraft.train.get_solver_from_sig,
populating all the proper param, deactivating EMA, FSDP, loading the best state,
basically all you need to get a solver ready to "play" with in single GPU mode
and with minimal memory overhead.
Args:
sig (str): signature to load.
dtype (str or None): potential dtype, as a string, i.e. 'float16'.
device (str or None): potential device, as a string, i.e. 'cuda'.
override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'.
"""
from audiocraft import train
our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}}
our_override_cfg['autocast'] = autocast
if dtype is not None:
our_override_cfg['dtype'] = dtype
if device is not None:
our_override_cfg['device'] = device
if batch_size is not None:
our_override_cfg['dataset'] = {'batch_size': batch_size}
if override_cfg is None:
override_cfg = {}
override_cfg = omegaconf.OmegaConf.merge(
omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore
solver = train.get_solver_from_sig(
sig, override_cfg=override_cfg,
load_best=True, disable_fsdp=True,
ignore_state_keys=['optimizer', 'ema'], **kwargs)
solver.model.eval()
return solver
| audiocraft-main | audiocraft/solvers/base.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import logging
from pathlib import Path
import re
import typing as tp
import flashy
import torch
from ..environment import AudioCraftEnvironment
logger = logging.getLogger(__name__)
class CheckpointSource(Enum):
CURRENT_XP = "current_xp"
PRETRAINED = "pretrained"
OTHER = "other"
def checkpoint_name(name: tp.Optional[str] = None, rank: tp.Optional[int] = None, use_fsdp: bool = False) -> str:
"""Checkpoint name formatted for all use in AudioCraft codebase and has the following format:
`checkpoint_<name>.th(.<rank>)`. By convention, name is expected to be empty for last checkpoint,
'best' for the best checkpoint or the epoch number.
Args:
name (str, optional): Name suffix for the checkpoint file stem.
rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided.
use_fsdp (bool): Whether the calling solver relies on FSDP.
Returns:
str: The checkpoint name.
"""
suffix = ''
if rank is None:
rank = flashy.distrib.rank()
if rank > 0 and use_fsdp:
suffix = '.' + str(rank)
name_part = ''
if name is not None:
name_part = f'_{name}'
return f'checkpoint{name_part}.th{suffix}'
def is_sharded_checkpoint(path: Path) -> bool:
"""Whether the checkpoint at the given path corresponds to a sharded checkpoint across rank."""
return re.search(r'\.th\.\d+$', path.name) is not None
def resolve_checkpoint_path(sig_or_path: tp.Union[Path, str], name: tp.Optional[str] = None,
use_fsdp: bool = False) -> tp.Optional[Path]:
"""Resolve a given checkpoint path for a provided dora sig or path.
Args:
sig_or_path (Path or str): Checkpoint path or dora signature.
name (str, optional): Name suffix for the checkpoint file stem.
rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided.
use_fsdp (bool): Whether the calling solver relies on FSDP.
Returns:
Path, optional: Resolved checkpoint path, if it exists.
"""
from audiocraft import train
xps_root = train.main.dora.dir / 'xps'
sig_or_path = str(sig_or_path)
if sig_or_path.startswith('//sig/'):
sig = sig_or_path[len('//sig/'):]
path = xps_root / sig
else:
path = Path(sig_or_path)
path = AudioCraftEnvironment.resolve_reference_path(path)
if path.is_dir():
path = path / checkpoint_name(name, use_fsdp=use_fsdp)
if path.exists():
return path
else:
return None
def load_checkpoint(checkpoint_path: Path, is_sharded: bool = False) -> tp.Any:
"""Load state from checkpoints at the specified checkpoint path."""
if is_sharded:
rank0_checkpoint_path = checkpoint_path.parent / checkpoint_name(use_fsdp=False)
if rank0_checkpoint_path.exists():
check_sharded_checkpoint(checkpoint_path, rank0_checkpoint_path)
state = torch.load(checkpoint_path, 'cpu')
logger.info("Checkpoint loaded from %s", checkpoint_path)
return state
def save_checkpoint(state: tp.Any, checkpoint_path: Path, is_sharded: bool = False) -> None:
"""Save state to disk to the specified checkpoint_path."""
_safe_save_checkpoint(state, checkpoint_path, is_sharded)
logger.info("Checkpoint saved to %s", checkpoint_path)
def flush_stale_checkpoints(checkpoint_path: Path, keep_last: tp.Optional[int] = None) -> None:
"""Flush checkpoints to only keep last N checkpoints."""
if keep_last is None or keep_last <= 0:
return
checkpoint_dir = checkpoint_path.parent
suffix = ''
if flashy.distrib.rank() > 0:
suffix = f'.{flashy.distrib.rank()}'
checkpoint_files_with_epoch = []
for path in Path(checkpoint_dir).glob(f'checkpoint_*.th{suffix}'):
epoch_part = path.name.split('.', 1)[0].split('_', 1)[1]
if epoch_part.isdigit():
checkpoint_files_with_epoch.append((path, int(epoch_part)))
checkpoint_files = [path for path, _ in list(sorted(checkpoint_files_with_epoch, key=lambda t: t[1]))]
total_to_flush = max(0, len(checkpoint_files) - keep_last)
files_to_flush = checkpoint_files[:total_to_flush]
for path in files_to_flush:
logger.debug("Removing checkpoint: %s", str(path))
path.unlink(missing_ok=True)
def check_sharded_checkpoint(checkpoint_path: Path, rank0_checkpoint_path: Path) -> None:
"""Check sharded checkpoint state, ensuring the checkpoints are not corrupted."""
# Finish the work of a previous run that got interrupted while dumping.
old_path = Path(str(checkpoint_path) + '.old')
if old_path.exists():
raise RuntimeError(
f"Old checkpoint {old_path} from previous version of this code exist, cannot safely proceed.")
token = Path(str(rank0_checkpoint_path) + '.tmp.done')
tmp_path = Path(str(checkpoint_path) + '.tmp')
if token.exists():
if tmp_path.exists():
tmp_path.rename(checkpoint_path)
flashy.distrib.barrier()
if flashy.distrib.is_rank_zero() and token.exists():
token.unlink()
def _safe_save_checkpoint(state: tp.Any, checkpoint_path: Path, is_sharded: bool = False) -> None:
"""Save checkpoints in a safe manner even with when sharded checkpoints across nodes."""
def _barrier_if_sharded():
if is_sharded:
flashy.distrib.barrier()
if flashy.distrib.is_rank_zero():
token = Path(str(checkpoint_path) + '.tmp.done')
if token.exists():
token.unlink()
_barrier_if_sharded()
with flashy.utils.write_and_rename(checkpoint_path) as f:
torch.save(state, f)
_barrier_if_sharded()
if flashy.distrib.is_rank_zero():
token.touch()
_barrier_if_sharded()
_barrier_if_sharded()
if flashy.distrib.rank() == 0:
token.unlink()
| audiocraft-main | audiocraft/utils/checkpoint.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from queue import Queue, Empty
import signal
import sys
import threading
import traceback
logger = logging.getLogger(__name__)
class DeadlockDetect:
def __init__(self, use: bool = False, timeout: float = 120.):
self.use = use
self.timeout = timeout
self._queue: Queue = Queue()
def update(self, stage: str):
if self.use:
self._queue.put(stage)
def __enter__(self):
if self.use:
self._thread = threading.Thread(target=self._detector_thread)
self._thread.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.use:
self._queue.put(None)
self._thread.join()
def _detector_thread(self):
logger.debug("Deadlock detector started")
last_stage = "init"
while True:
try:
stage = self._queue.get(timeout=self.timeout)
except Empty:
break
if stage is None:
logger.debug("Exiting deadlock detector thread")
return
else:
last_stage = stage
logger.error("Deadlock detector timed out, last stage was %s", last_stage)
for th in threading.enumerate():
print(th, file=sys.stderr)
traceback.print_stack(sys._current_frames()[th.ident])
print(file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
os.kill(os.getpid(), signal.SIGKILL)
| audiocraft-main | audiocraft/utils/deadlock.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy functions used at the time of the first release, kept for referencd.
"""
from pathlib import Path
import typing as tp
from omegaconf import OmegaConf, DictConfig
import torch
def _clean_lm_cfg(cfg: DictConfig):
OmegaConf.set_struct(cfg, False)
# This used to be set automatically in the LM solver, need a more robust solution
# for the future.
cfg['transformer_lm']['card'] = 2048
cfg['transformer_lm']['n_q'] = 4
# Experimental params no longer supported.
bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters',
'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop']
for name in bad_params:
del cfg['transformer_lm'][name]
OmegaConf.set_struct(cfg, True)
return cfg
def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
sig = Path(checkpoint_path).parent.name
assert len(sig) == 8, "Not a valid Dora signature"
pkg = torch.load(checkpoint_path, 'cpu')
new_pkg = {
'best_state': pkg['ema']['state']['model'],
'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']),
}
out_file = Path(out_folder) / f'{sig}.th'
torch.save(new_pkg, out_file)
return out_file
def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
sig = Path(checkpoint_path).parent.name
assert len(sig) == 8, "Not a valid Dora signature"
pkg = torch.load(checkpoint_path, 'cpu')
new_pkg = {
'best_state': pkg['fsdp_best_state']['model'],
'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg']))
}
out_file = Path(out_folder) / f'{sig}.th'
torch.save(new_pkg, out_file)
return out_file
| audiocraft-main | audiocraft/utils/export_legacy.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class TorchAutocast:
"""TorchAutocast utility class.
Allows you to enable and disable autocast. This is specially useful
when dealing with different architectures and clusters with different
levels of support.
Args:
enabled (bool): Whether to enable torch.autocast or not.
args: Additional args for torch.autocast.
kwargs: Additional kwargs for torch.autocast
"""
def __init__(self, enabled: bool, *args, **kwargs):
self.autocast = torch.autocast(*args, **kwargs) if enabled else None
def __enter__(self):
if self.autocast is None:
return
try:
self.autocast.__enter__()
except RuntimeError:
device = self.autocast.device
dtype = self.autocast.fast_dtype
raise RuntimeError(
f"There was an error autocasting with dtype={dtype} device={device}\n"
"If you are on the FAIR Cluster, you might need to use autocast_dtype=float16"
)
def __exit__(self, *args, **kwargs):
if self.autocast is None:
return
self.autocast.__exit__(*args, **kwargs)
| audiocraft-main | audiocraft/utils/autocast.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import logging
import typing as tp
import flashy
import torch
from ..optim import ModuleDictEMA
from .utils import copy_state
logger = logging.getLogger(__name__)
class BestStateDictManager(flashy.state.StateDictSource):
"""BestStateDictManager maintains a copy of best state_dict() for registered sources.
BestStateDictManager has two main attributes:
states (dict): State dict of the registered StateDictSource.
param_ids (dict): Dict of parameter ids for registered states from ModuleDictEMA and other sources.
When registering new sources, the BestStateDictManager will ensure two conflicting sources between
ModuleDictEMA and original modules are not both registered as it would otherwise create ambiguity about
what to consider for best state.
Args:
device (torch.device or str): Device on which we keep the copy.
dtype (torch.dtype): Data type for the state parameters.
"""
def __init__(self, device: tp.Union[torch.device, str] = 'cpu',
dtype: tp.Optional[torch.dtype] = None):
self.device = device
self.states: dict = {}
self.param_ids: dict = defaultdict(dict)
self.dtype = dtype
def _get_parameter_ids(self, state_dict):
return {id(p): name for name, p in state_dict.items() if isinstance(p, torch.Tensor)}
def _validate_no_parameter_ids_overlap(self, name: str, param_ids: dict):
for registered_name, registered_param_ids in self.param_ids.items():
if registered_name != name:
overlap = set.intersection(registered_param_ids.keys(), param_ids.keys())
assert len(overlap) == 0, f"Found {len(overlap)} / {len(param_ids.keys())} overlapping parameters"
f" in {name} and already registered {registered_name}: {' '.join(overlap)}"
def update(self, name: str, source: flashy.state.StateDictSource):
if name not in self.states:
raise ValueError(f"{name} missing from registered states.")
self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype)
def register(self, name: str, source: flashy.state.StateDictSource):
if name in self.states:
raise ValueError(f"{name} already present in states.")
# Registering parameter ids for EMA and non-EMA states allows us to check that
# there is no overlap that would create ambiguity about how to handle the best state
param_ids = self._get_parameter_ids(source.state_dict())
if isinstance(source, ModuleDictEMA):
logger.debug(f"Registering to best state: ModuleDictEMA '{name}' with {len(param_ids)} params")
self._validate_no_parameter_ids_overlap(name, param_ids)
self.param_ids[name] = param_ids
else:
logger.debug(f"Registering to best state: StateDictSource '{name}' with {len(param_ids)} params")
self._validate_no_parameter_ids_overlap('base', param_ids)
self.param_ids['base'].update(param_ids)
# Register state
self.states[name] = copy_state(source.state_dict(), device=self.device, dtype=self.dtype)
def state_dict(self) -> flashy.state.StateDict:
return self.states
def load_state_dict(self, state: flashy.state.StateDict):
for name, sub_state in state.items():
for k, v in sub_state.items():
self.states[name][k].copy_(v)
| audiocraft-main | audiocraft/utils/best_state.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ThreadPoolExecutor
from collections import deque
from functools import partial
from hashlib import sha1
import logging
from pathlib import Path
import sys
import typing as tp
import zipfile
import flashy
import torch
logger = logging.getLogger(__name__)
def get_full_embed(full_embed: torch.Tensor, x: tp.Any, idx: int, device: tp.Union[str, torch.device]) -> torch.Tensor:
"""Utility function for the EmbeddingCache, returning the full embedding without any chunking.
This method can be used in case there is no need in extracting a chunk of the full embedding
read from the cache.
Args:
full_embed (torch.Tensor): The full embedding.
x (any): Batch object from which the full embedding is derived.
idx (torch.Tensor): Index of object to consider in the batch object.
Returns:
full_embed (torch.Tensor): The full embedding
"""
return full_embed.to(device)
class EmbeddingCache:
"""Cache around embeddings computation for faster execution.
The EmbeddingCache is storing pre-computed embeddings on disk and provides a simple API
to retrieve the pre-computed embeddings on full inputs and extract only a given chunk
using a user-provided function. When the cache is warm (all embeddings are pre-computed),
the EmbeddingCache allows for faster training as it removes the need of computing the embeddings.
Additionally, it provides in-memory cache around the loaded embeddings to limit IO footprint
and synchronization points in the forward calls.
Args:
cache_path (Path): Path to folder where all pre-computed embeddings are saved on disk.
device (str or torch.device): Device on which the embedding is returned.
compute_embed_fn (callable[[Path, any, int], torch.Tensor], optional): Function to compute
the embedding from a given object and path. This user provided function can compute the
embedding from the provided object or using the provided path as entry point. The last parameter
specify the index corresponding to the current embedding in the object that can represent batch metadata.
extract_embed_fn (callable[[torch.Tensor, any, int], torch.Tensor], optional): Function to extract
the desired embedding chunk from the full embedding loaded from the cache. The last parameter
specify the index corresponding to the current embedding in the object that can represent batch metadata.
If not specified, will return the full embedding unmodified.
"""
def __init__(self, cache_path: tp.Union[Path], device: tp.Union[str, torch.device],
compute_embed_fn: tp.Callable[[Path, tp.Any, int], torch.Tensor],
extract_embed_fn: tp.Optional[tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]] = None):
self.cache_path = Path(cache_path)
self.device = device
self._compute_embed_fn = compute_embed_fn
self._extract_embed_fn: tp.Callable[[torch.Tensor, tp.Any, int], torch.Tensor]
if extract_embed_fn is not None:
self._extract_embed_fn = extract_embed_fn
else:
self._extract_embed_fn = partial(get_full_embed, device=device)
if self.cache_path is not None:
self.cache_path.mkdir(exist_ok=True, parents=True)
logger.info(f"Cache instantiated at: {self.cache_path}")
self.pool = ThreadPoolExecutor(8)
self.pool.__enter__()
self._current_batch_cache: dict = {}
self._memory_cache: dict = {}
def _get_cache_path(self, path: tp.Union[Path, str]):
"""Get cache path for the given file path."""
sig = sha1(str(path).encode()).hexdigest()
return self.cache_path / sig
@staticmethod
def _get_full_embed_from_cache(cache: Path):
"""Loads full pre-computed embedding from the cache."""
try:
embed = torch.load(cache, 'cpu')
except Exception as exc:
logger.error("Error loading %s: %r", cache, exc)
embed = None
return embed
def get_embed_from_cache(self, paths: tp.List[Path], x: tp.Any) -> torch.Tensor:
"""Get embedding from cache, computing and storing it to cache if not already cached.
The EmbeddingCache first tries to load the embedding from the in-memory cache
containing the pre-computed chunks populated through `populate_embed_cache`.
If not found, the full embedding is computed and stored on disk to be later accessed
to populate the in-memory cache, and the desired embedding chunk is extracted and returned.
Args:
paths (list[Path or str]): List of paths from where the embeddings can be loaded.
x (any): Object from which the embedding is extracted.
"""
embeds = []
for idx, path in enumerate(paths):
cache = self._get_cache_path(path)
if cache in self._current_batch_cache:
embed = self._current_batch_cache[cache]
else:
full_embed = self._compute_embed_fn(path, x, idx)
try:
with flashy.utils.write_and_rename(cache, pid=True) as f:
torch.save(full_embed.cpu(), f)
except Exception as exc:
logger.error('Error saving embed %s (%s): %r', cache, full_embed.shape, exc)
else:
logger.info('New embed cache saved: %s (%s)', cache, full_embed.shape)
embed = self._extract_embed_fn(full_embed, x, idx)
embeds.append(embed)
embed = torch.stack(embeds, dim=0)
return embed
def populate_embed_cache(self, paths: tp.List[Path], x: tp.Any) -> None:
"""Populate in-memory caches for embeddings reading from the embeddings stored on disk.
The in-memory caches consist in a cache for the full embedding and another cache for the
final embedding chunk. Such caches are used to limit the IO access when computing the actual embeddings
and reduce the IO footprint and synchronization points during forward passes.
Args:
paths (list[Path]): List of paths from where the embeddings can be loaded.
x (any): Object from which the embedding is extracted.
"""
self._current_batch_cache.clear()
if self.cache_path is not None:
futures: list = []
for path in paths:
assert path is not None, "Path is required for computation from cache"
cache = self._get_cache_path(path)
if cache in self._memory_cache or not cache.exists():
futures.append(None)
else:
futures.append(self.pool.submit(EmbeddingCache._get_full_embed_from_cache, cache))
for idx, (path, future) in enumerate(zip(paths, futures)):
assert path is not None
cache = self._get_cache_path(path)
full_embed = None
if future is None:
if cache in self._memory_cache:
full_embed = self._memory_cache[cache]
else:
full_embed = future.result()
if full_embed is not None:
self._memory_cache[cache] = full_embed
full_embed = full_embed.to(self.device)
if full_embed is not None:
embed = self._extract_embed_fn(full_embed, x, idx)
self._current_batch_cache[cache] = embed
class CachedBatchWriter:
"""Write pre computed caches for mini batches. This can
make loading a lot more efficient depending on your filesystem.
Args:
cache_folder (Path): folder in which the cached minibatches
will be stored.
Inside cache folder, the structure is the following:
`epoch_number / update_number.zip`
And the zip file contains one entry per batch item.
It is possible to use the cache with a batch size smaller than
created with but obviously not larger. Make sure to call the
`start_epoch(epoch)` method for indicating changes of epochs.
See the grid `audiocraft/grids/musicgen/musicgen_warmup_cache.py`
for an example of how to warmup the cache.
"""
def __init__(self, cache_folder: Path):
self.cache_folder = cache_folder
self._current_epoch: tp.Optional[int] = None
self._current_index = 0
def start_epoch(self, epoch: int):
"""Call at the beginning of each epoch.
"""
self._current_epoch = epoch
self._current_index = 0
self._zip_path.parent.mkdir(exist_ok=True, parents=True)
@staticmethod
def _get_zip_path(cache_folder: Path, epoch: int, index: int):
return cache_folder / f"{epoch:05d}" / f"{index:06d}.zip"
@property
def _zip_path(self):
assert self._current_epoch is not None
return CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch, self._current_index)
def save(self, *content):
"""Save one mini batch. This function is distributed-aware
and will automatically merge all the items from the different
workers.
"""
all_contents = []
for rank in range(flashy.distrib.world_size()):
their_content = flashy.distrib.broadcast_object(content, src=rank)
all_contents.append(their_content)
if flashy.distrib.is_rank_zero():
idx = 0
with flashy.utils.write_and_rename(self._zip_path) as tmp:
with zipfile.ZipFile(tmp, 'w') as zf:
for content in all_contents:
for vals in zip(*content):
with zf.open(f'{idx}', 'w') as f: # type: ignore
torch.save(vals, f)
idx += 1
flashy.distrib.barrier()
self._current_index += 1
class CachedBatchLoader:
"""Loader for cached mini-batches dumped with `CachedBatchWriter`.
Args:
cache_folder (Path): folder in which the cached minibatches are stored.
batch_size (int): batch size (per GPU) expected.
num_workers (int): number of workers to use for loading.
min_length (int): minimum expected length for each epoch. If some
mini-batches are missing, and error is raised.
This is iterable just like a regular DataLoader.
"""
def __init__(self, cache_folder: Path, batch_size: int,
num_workers: int = 10, min_length: int = 1):
self.cache_folder = cache_folder
self.batch_size = batch_size
self.num_workers = num_workers
self.min_length = min_length
self._current_epoch: tp.Optional[int] = None
self.sampler = None # for compatibility with the regular DataLoader
def __len__(self):
path = CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch or 0, 0).parent
return len([p for p in path.iterdir() if p.suffix == ".zip"])
def start_epoch(self, epoch: int):
"""Call at the beginning of each epoch.
"""
self._current_epoch = epoch
def _zip_path(self, index: int):
assert self._current_epoch is not None
return CachedBatchWriter._get_zip_path(self.cache_folder, self._current_epoch, index)
def _load_one(self, index: int):
zip_path = self._zip_path(index)
if not zip_path.exists():
if index < self.min_length:
raise RuntimeError(f"Cache should have at least {self.min_length} batches, but {index} doesn't exist")
return None
mode = "rb" if sys.version_info >= (3, 9) else "r"
try:
with zipfile.ZipFile(zip_path, 'r') as zf:
rank = flashy.distrib.rank()
world_size = flashy.distrib.world_size()
root = zipfile.Path(zf)
items = list(root.iterdir())
total_batch_size = self.batch_size * world_size
if len(items) < total_batch_size:
raise RuntimeError(
f"The cache can handle a max batch size of {len(items)}, "
f"but {total_batch_size} is needed.")
start = rank * self.batch_size
items = items[start: start + self.batch_size]
assert len(items) == self.batch_size
entries = []
entries = [torch.load(item.open(mode), 'cpu') for item in items] # type: ignore
transposed = zip(*entries)
out = []
for part in transposed:
assert len(part) > 0
if isinstance(part[0], torch.Tensor):
out.append(torch.stack(part))
else:
out.append(part)
return out
except Exception:
logger.error("Error when reading zip path %s", zip_path)
raise
def __iter__(self):
"""This will yields tuples, exactly as provided to the
`CachedBatchWriter.save` method.
"""
pool = ThreadPoolExecutor(self.num_workers)
next_index = 0
queue = deque()
def _get_next():
nonlocal next_index
r = queue.popleft().result()
if r is None:
return None
else:
queue.append(pool.submit(self._load_one, next_index))
next_index += 1
return r
with pool:
# fill the buffer of fetching jobs.
for _ in range(2 * self.num_workers):
queue.append(pool.submit(self._load_one, next_index))
next_index += 1
while True:
batch = _get_next()
if batch is None:
return
yield batch
| audiocraft-main | audiocraft/utils/cache.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities."""
| audiocraft-main | audiocraft/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility to export a training checkpoint to a lightweight release checkpoint.
"""
from pathlib import Path
import typing as tp
from omegaconf import OmegaConf
import torch
from audiocraft import __version__
def export_encodec(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]):
"""Export only the best state from the given EnCodec checkpoint. This
should be used if you trained your own EnCodec model.
"""
pkg = torch.load(checkpoint_path, 'cpu')
new_pkg = {
'best_state': pkg['best_state']['model'],
'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']),
'version': __version__,
'exported': True,
}
Path(out_file).parent.mkdir(exist_ok=True, parents=True)
torch.save(new_pkg, out_file)
return out_file
def export_pretrained_compression_model(pretrained_encodec: str, out_file: tp.Union[Path, str]):
"""Export a compression model (potentially EnCodec) from a pretrained model.
This is required for packaging the audio tokenizer along a MusicGen or AudioGen model.
Do not include the //pretrained/ prefix. For instance if you trained a model
with `facebook/encodec_32khz`, just put that as a name. Same for `dac_44khz`.
In that case, this will not actually include a copy of the model, simply the reference
to the model used.
"""
if Path(pretrained_encodec).exists():
pkg = torch.load(pretrained_encodec)
assert 'best_state' in pkg
assert 'xp.cfg' in pkg
assert 'version' in pkg
assert 'exported' in pkg
else:
pkg = {
'pretrained': pretrained_encodec,
'exported': True,
'version': __version__,
}
Path(out_file).parent.mkdir(exist_ok=True, parents=True)
torch.save(pkg, out_file)
def export_lm(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]):
"""Export only the best state from the given MusicGen or AudioGen checkpoint.
"""
pkg = torch.load(checkpoint_path, 'cpu')
if pkg['fsdp_best_state']:
best_state = pkg['fsdp_best_state']['model']
else:
assert pkg['best_state']
best_state = pkg['best_state']['model']
new_pkg = {
'best_state': best_state,
'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']),
'version': __version__,
'exported': True,
}
Path(out_file).parent.mkdir(exist_ok=True, parents=True)
torch.save(new_pkg, out_file)
return out_file
| audiocraft-main | audiocraft/utils/export.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from functools import wraps, lru_cache
import hashlib
import json
import logging
from pathlib import Path
import typing as tp
import flashy
import flashy.distrib
import omegaconf
import torch
from torch.nn.utils.rnn import pad_sequence
logger = logging.getLogger(__name__)
def model_hash(model: torch.nn.Module) -> str:
"""Return a model hash. This should allow us to track regressions in model init
from the logs of past experiments.
"""
hasher = hashlib.sha1()
for p in model.parameters():
hasher.update(p.data.cpu().numpy().tobytes())
return hasher.hexdigest()
def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
"""Convenience function to map an omegaconf configuration to a dictionary.
Args:
cfg (omegaconf.DictConfig): Original configuration to map to dict.
Returns:
dict: Config as dictionary object.
"""
dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
assert isinstance(dct, dict)
return dct
def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
if max_samples >= len(dataset):
return dataset
generator = torch.Generator().manual_seed(seed)
perm = torch.randperm(len(dataset), generator=generator)
return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
"""Convenience function to load dataset into a dataloader with optional subset sampling.
Args:
dataset: Dataset to load.
num_samples (Optional[int]): Number of samples to limit subset size.
batch_size (int): Batch size.
num_workers (int): Number of workers for data loading.
seed (int): Random seed.
"""
if num_samples is not None:
dataset = random_subset(dataset, num_samples, seed)
dataloader = flashy.distrib.loader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
**kwargs
)
return dataloader
def get_dataset_from_loader(dataloader):
dataset = dataloader.dataset
if isinstance(dataset, torch.utils.data.Subset):
return dataset.dataset
else:
return dataset
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
"""torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
Args:
input (torch.Tensor): The input tensor containing probabilities.
num_samples (int): Number of samples to draw.
replacement (bool): Whether to draw with replacement or not.
Keywords args:
generator (torch.Generator): A pseudorandom number generator for sampling.
Returns:
torch.Tensor: Last dimension contains num_samples indices
sampled from the multinomial probability distribution
located in the last dimension of tensor input.
"""
input_ = input.reshape(-1, input.shape[-1])
output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
output = output_.reshape(*list(input.shape[:-1]), -1)
return output
def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
"""Sample next token from top K values along the last dimension of the input probs tensor.
Args:
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
k (int): The k in “top-k”.
Returns:
torch.Tensor: Sampled tokens.
"""
top_k_value, _ = torch.topk(probs, k, dim=-1)
min_value_top_k = top_k_value[..., [-1]]
probs *= (probs >= min_value_top_k).float()
probs.div_(probs.sum(dim=-1, keepdim=True))
next_token = multinomial(probs, num_samples=1)
return next_token
def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
"""Sample next token from top P probabilities along the last dimension of the input probs tensor.
Args:
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
p (int): The p in “top-p”.
Returns:
torch.Tensor: Sampled tokens.
"""
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort *= (~mask).float()
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token
class DummyPoolExecutor:
"""Dummy pool executor to use when we actually have only 1 worker.
(e.g. instead of ProcessPoolExecutor).
"""
class DummyResult:
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def result(self):
return self.func(*self.args, **self.kwargs)
def __init__(self, workers, mp_context=None):
pass
def submit(self, func, *args, **kwargs):
return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
return
def get_pool_executor(num_workers: int, mp_context=None):
return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
"""Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
Args:
lengths (torch.Tensor): tensor with lengths
max_len (int): can set the max length manually. Defaults to None.
Returns:
torch.Tensor: mask with 0s where there is pad tokens else 1s
"""
assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
final_length = lengths.max().item() if not max_len else max_len
final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
def hash_trick(word: str, vocab_size: int) -> int:
"""Hash trick to pair each word with an index
Args:
word (str): word we wish to convert to an index
vocab_size (int): size of the vocabulary
Returns:
int: index of the word in the embedding LUT
"""
hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
return hash % vocab_size
def with_rank_rng(base_seed: int = 1234):
"""Decorator for a function so that the function will use a Random Number Generator
whose state depend on the GPU rank. The original RNG state is restored upon returning.
Args:
base_seed (int): Random seed.
"""
def _decorator(fun: tp.Callable):
@wraps(fun)
def _decorated(*args, **kwargs):
state = torch.get_rng_state()
seed = base_seed ^ flashy.distrib.rank()
torch.manual_seed(seed)
logger.debug('Rank dependent seed set to %d', seed)
try:
return fun(*args, **kwargs)
finally:
torch.set_rng_state(state)
logger.debug('RNG state restored.')
return _decorated
return _decorator
def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
"""Get a list of tensors and collate them to a single tensor. according to the following logic:
- `dim` specifies the time dimension which will be stacked and padded.
- The output will contain 1 new dimension (dimension index 0) which will be the size of
of the original list.
Args:
tensors (tp.List[torch.Tensor]): List of tensors to collate.
dim (int): Dimension which will be stacked and padded.
Returns:
tp.Tuple[torch.Tensor, torch.Tensor]:
torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
(dimension index 0) which will be the size of the original list.
torch.Tensor: Tensor containing length of original tensor sizes (without padding).
"""
tensors = [x.transpose(0, dim) for x in tensors]
lens = torch.LongTensor([len(x) for x in tensors])
padded_tensors = pad_sequence(tensors)
padded_tensors = padded_tensors.transpose(0, 1)
padded_tensors = padded_tensors.transpose(1, dim + 1)
return padded_tensors, lens
# TODO: Move to flashy?
def copy_state(state: tp.Any, device: tp.Union[torch.device, str] = 'cpu',
dtype: tp.Optional[torch.dtype] = None) -> tp.Any:
if isinstance(state, torch.Tensor):
if dtype is None or not state.is_floating_point():
dtype = state.dtype
return state.detach().to(device=device, dtype=dtype, copy=True)
elif isinstance(state, dict):
return {k: copy_state(v, device, dtype) for k, v in state.items()}
elif isinstance(state, list):
return [copy_state(v, device, dtype) for v in state]
# TODO: Move to flashy?
@contextmanager
def swap_state(model, state, **kwargs):
old_state = copy_state(model.state_dict())
model.load_state_dict(state, **kwargs)
try:
yield
finally:
model.load_state_dict(old_state)
@lru_cache(None)
def warn_once(logger, msg):
"""Warn about a given message only once."""
logger.warning(msg)
def is_jsonable(x: tp.Any):
"""Check if an object can be serialized into a json:"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
def load_clap_state_dict(clap_model, path: tp.Union[str, Path]):
"""Wrapper around state dict loading of CLAP model
addressing compatibility issues between CLAP and AudioCraft
HuggingFace transformer version.
See: https://github.com/LAION-AI/CLAP/issues/118
"""
from clap_module.factory import load_state_dict # type: ignore
pkg = load_state_dict(path)
pkg.pop('text_branch.embeddings.position_ids', None)
clap_model.model.load_state_dict(pkg)
| audiocraft-main | audiocraft/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility functions for SLURM configuration and cluster settings.
"""
from enum import Enum
import os
import socket
import typing as tp
import omegaconf
class ClusterType(Enum):
AWS = "aws"
FAIR = "fair"
RSC = "rsc"
LOCAL_DARWIN = "darwin"
DEFAULT = "default" # used for any other cluster.
def _guess_cluster_type() -> ClusterType:
uname = os.uname()
fqdn = socket.getfqdn()
if uname.sysname == "Linux" and (uname.release.endswith("-aws") or ".ec2" in fqdn):
return ClusterType.AWS
if fqdn.endswith(".fair"):
return ClusterType.FAIR
if fqdn.endswith(".facebook.com"):
return ClusterType.RSC
if uname.sysname == "Darwin":
return ClusterType.LOCAL_DARWIN
return ClusterType.DEFAULT
def get_cluster_type(
cluster_type: tp.Optional[ClusterType] = None,
) -> tp.Optional[ClusterType]:
if cluster_type is None:
return _guess_cluster_type()
return cluster_type
def get_slurm_parameters(
cfg: omegaconf.DictConfig, cluster_type: tp.Optional[ClusterType] = None
) -> omegaconf.DictConfig:
"""Update SLURM parameters in configuration based on cluster type.
If the cluster type is not specify, it infers it automatically.
"""
from ..environment import AudioCraftEnvironment
cluster_type = get_cluster_type(cluster_type)
# apply cluster-specific adjustments
if cluster_type == ClusterType.AWS:
cfg["mem_per_gpu"] = None
cfg["constraint"] = None
cfg["setup"] = []
elif cluster_type == ClusterType.RSC:
cfg["mem_per_gpu"] = None
cfg["setup"] = []
cfg["constraint"] = None
cfg["partition"] = "learn"
slurm_exclude = AudioCraftEnvironment.get_slurm_exclude()
if slurm_exclude is not None:
cfg["exclude"] = slurm_exclude
return cfg
| audiocraft-main | audiocraft/utils/cluster.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
import IPython.display as ipd # type: ignore
except ImportError:
# Note in a notebook...
pass
import torch
def display_audio(samples: torch.Tensor, sample_rate: int):
"""Renders an audio player for the given audio samples.
Args:
samples (torch.Tensor): a Tensor of decoded audio samples
with shapes [B, C, T] or [C, T]
sample_rate (int): sample rate audio should be displayed with.
"""
assert samples.dim() == 2 or samples.dim() == 3
samples = samples.detach().cpu()
if samples.dim() == 2:
samples = samples[None, ...]
for audio in samples:
ipd.display(ipd.Audio(audio, rate=sample_rate))
| audiocraft-main | audiocraft/utils/notebook.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import typing as tp
import dora
import torch
logger = logging.getLogger(__name__)
class Profiler:
"""Context manager wrapper for xformers profiler.
"""
def __init__(self, module: torch.nn.Module, enabled: bool = False):
self.profiler: tp.Optional[tp.Any] = None
if enabled:
from xformers.profiler import profile
output_dir = dora.get_xp().folder / 'profiler_data'
logger.info("Profiling activated, results with be saved to %s", output_dir)
self.profiler = profile(output_dir=output_dir, module=module)
def step(self):
if self.profiler is not None:
self.profiler.step() # type: ignore
def __enter__(self):
if self.profiler is not None:
return self.profiler.__enter__() # type: ignore
def __exit__(self, exc_type, exc_value, exc_tb):
if self.profiler is not None:
return self.profiler.__exit__(exc_type, exc_value, exc_tb) # type: ignore
| audiocraft-main | audiocraft/utils/profiler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| audiocraft-main | audiocraft/utils/samples/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
API that can manage the storage and retrieval of generated samples produced by experiments.
It offers the following benefits:
* Samples are stored in a consistent way across epoch
* Metadata about the samples can be stored and retrieved
* Can retrieve audio
* Identifiers are reliable and deterministic for prompted and conditioned samples
* Can request the samples for multiple XPs, grouped by sample identifier
* For no-input samples (not prompt and no conditions), samples across XPs are matched
by sorting their identifiers
"""
from concurrent.futures import ThreadPoolExecutor
from dataclasses import asdict, dataclass
from functools import lru_cache
import hashlib
import json
import logging
from pathlib import Path
import re
import typing as tp
import unicodedata
import uuid
import dora
import torch
from ...data.audio import audio_read, audio_write
logger = logging.getLogger(__name__)
@dataclass
class ReferenceSample:
id: str
path: str
duration: float
@dataclass
class Sample:
id: str
path: str
epoch: int
duration: float
conditioning: tp.Optional[tp.Dict[str, tp.Any]]
prompt: tp.Optional[ReferenceSample]
reference: tp.Optional[ReferenceSample]
generation_args: tp.Optional[tp.Dict[str, tp.Any]]
def __hash__(self):
return hash(self.id)
def audio(self) -> tp.Tuple[torch.Tensor, int]:
return audio_read(self.path)
def audio_prompt(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]:
return audio_read(self.prompt.path) if self.prompt is not None else None
def audio_reference(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]:
return audio_read(self.reference.path) if self.reference is not None else None
class SampleManager:
"""Audio samples IO handling within a given dora xp.
The sample manager handles the dumping and loading logic for generated and
references samples across epochs for a given xp, providing a simple API to
store, retrieve and compare audio samples.
Args:
xp (dora.XP): Dora experiment object. The XP contains information on the XP folder
where all outputs are stored and the configuration of the experiment,
which is useful to retrieve audio-related parameters.
map_reference_to_sample_id (bool): Whether to use the sample_id for all reference samples
instead of generating a dedicated hash id. This is useful to allow easier comparison
with ground truth sample from the files directly without having to read the JSON metadata
to do the mapping (at the cost of potentially dumping duplicate prompts/references
depending on the task).
"""
def __init__(self, xp: dora.XP, map_reference_to_sample_id: bool = False):
self.xp = xp
self.base_folder: Path = xp.folder / xp.cfg.generate.path
self.reference_folder = self.base_folder / 'reference'
self.map_reference_to_sample_id = map_reference_to_sample_id
self.samples: tp.List[Sample] = []
self._load_samples()
@property
def latest_epoch(self):
"""Latest epoch across all samples."""
return max(self.samples, key=lambda x: x.epoch).epoch if self.samples else 0
def _load_samples(self):
"""Scan the sample folder and load existing samples."""
jsons = self.base_folder.glob('**/*.json')
with ThreadPoolExecutor(6) as pool:
self.samples = list(pool.map(self._load_sample, jsons))
@staticmethod
@lru_cache(2**26)
def _load_sample(json_file: Path) -> Sample:
with open(json_file, 'r') as f:
data: tp.Dict[str, tp.Any] = json.load(f)
# fetch prompt data
prompt_data = data.get('prompt')
prompt = ReferenceSample(id=prompt_data['id'], path=prompt_data['path'],
duration=prompt_data['duration']) if prompt_data else None
# fetch reference data
reference_data = data.get('reference')
reference = ReferenceSample(id=reference_data['id'], path=reference_data['path'],
duration=reference_data['duration']) if reference_data else None
# build sample object
return Sample(id=data['id'], path=data['path'], epoch=data['epoch'], duration=data['duration'],
prompt=prompt, conditioning=data.get('conditioning'), reference=reference,
generation_args=data.get('generation_args'))
def _init_hash(self):
return hashlib.sha1()
def _get_tensor_id(self, tensor: torch.Tensor) -> str:
hash_id = self._init_hash()
hash_id.update(tensor.numpy().data)
return hash_id.hexdigest()
def _get_sample_id(self, index: int, prompt_wav: tp.Optional[torch.Tensor],
conditions: tp.Optional[tp.Dict[str, str]]) -> str:
"""Computes an id for a sample given its input data.
This id is deterministic if prompt and/or conditions are provided by using a sha1 hash on the input.
Otherwise, a random id of the form "noinput_{uuid4().hex}" is returned.
Args:
index (int): Batch index, Helpful to differentiate samples from the same batch.
prompt_wav (torch.Tensor): Prompt used during generation.
conditions (dict[str, str]): Conditioning used during generation.
"""
# For totally unconditioned generations we will just use a random UUID.
# The function get_samples_for_xps will do a simple ordered match with a custom key.
if prompt_wav is None and not conditions:
return f"noinput_{uuid.uuid4().hex}"
# Human readable portion
hr_label = ""
# Create a deterministic id using hashing
hash_id = self._init_hash()
hash_id.update(f"{index}".encode())
if prompt_wav is not None:
hash_id.update(prompt_wav.numpy().data)
hr_label += "_prompted"
else:
hr_label += "_unprompted"
if conditions:
encoded_json = json.dumps(conditions, sort_keys=True).encode()
hash_id.update(encoded_json)
cond_str = "-".join([f"{key}={slugify(value)}"
for key, value in sorted(conditions.items())])
cond_str = cond_str[:100] # some raw text might be too long to be a valid filename
cond_str = cond_str if len(cond_str) > 0 else "unconditioned"
hr_label += f"_{cond_str}"
else:
hr_label += "_unconditioned"
return hash_id.hexdigest() + hr_label
def _store_audio(self, wav: torch.Tensor, stem_path: Path, overwrite: bool = False) -> Path:
"""Stores the audio with the given stem path using the XP's configuration.
Args:
wav (torch.Tensor): Audio to store.
stem_path (Path): Path in sample output directory with file stem to use.
overwrite (bool): When False (default), skips storing an existing audio file.
Returns:
Path: The path at which the audio is stored.
"""
existing_paths = [
path for path in stem_path.parent.glob(stem_path.stem + '.*')
if path.suffix != '.json'
]
exists = len(existing_paths) > 0
if exists and overwrite:
logger.warning(f"Overwriting existing audio file with stem path {stem_path}")
elif exists:
return existing_paths[0]
audio_path = audio_write(stem_path, wav, **self.xp.cfg.generate.audio)
return audio_path
def add_sample(self, sample_wav: torch.Tensor, epoch: int, index: int = 0,
conditions: tp.Optional[tp.Dict[str, str]] = None, prompt_wav: tp.Optional[torch.Tensor] = None,
ground_truth_wav: tp.Optional[torch.Tensor] = None,
generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> Sample:
"""Adds a single sample.
The sample is stored in the XP's sample output directory, under a corresponding epoch folder.
Each sample is assigned an id which is computed using the input data. In addition to the
sample itself, a json file containing associated metadata is stored next to it.
Args:
sample_wav (torch.Tensor): sample audio to store. Tensor of shape [channels, shape].
epoch (int): current training epoch.
index (int): helpful to differentiate samples from the same batch.
conditions (dict[str, str], optional): conditioning used during generation.
prompt_wav (torch.Tensor, optional): prompt used during generation. Tensor of shape [channels, shape].
ground_truth_wav (torch.Tensor, optional): reference audio where prompt was extracted from.
Tensor of shape [channels, shape].
generation_args (dict[str, any], optional): dictionary of other arguments used during generation.
Returns:
Sample: The saved sample.
"""
sample_id = self._get_sample_id(index, prompt_wav, conditions)
reuse_id = self.map_reference_to_sample_id
prompt, ground_truth = None, None
if prompt_wav is not None:
prompt_id = sample_id if reuse_id else self._get_tensor_id(prompt_wav.sum(0, keepdim=True))
prompt_duration = prompt_wav.shape[-1] / self.xp.cfg.sample_rate
prompt_path = self._store_audio(prompt_wav, self.base_folder / str(epoch) / 'prompt' / prompt_id)
prompt = ReferenceSample(prompt_id, str(prompt_path), prompt_duration)
if ground_truth_wav is not None:
ground_truth_id = sample_id if reuse_id else self._get_tensor_id(ground_truth_wav.sum(0, keepdim=True))
ground_truth_duration = ground_truth_wav.shape[-1] / self.xp.cfg.sample_rate
ground_truth_path = self._store_audio(ground_truth_wav, self.base_folder / 'reference' / ground_truth_id)
ground_truth = ReferenceSample(ground_truth_id, str(ground_truth_path), ground_truth_duration)
sample_path = self._store_audio(sample_wav, self.base_folder / str(epoch) / sample_id, overwrite=True)
duration = sample_wav.shape[-1] / self.xp.cfg.sample_rate
sample = Sample(sample_id, str(sample_path), epoch, duration, conditions, prompt, ground_truth, generation_args)
self.samples.append(sample)
with open(sample_path.with_suffix('.json'), 'w') as f:
json.dump(asdict(sample), f, indent=2)
return sample
def add_samples(self, samples_wavs: torch.Tensor, epoch: int,
conditioning: tp.Optional[tp.List[tp.Dict[str, tp.Any]]] = None,
prompt_wavs: tp.Optional[torch.Tensor] = None,
ground_truth_wavs: tp.Optional[torch.Tensor] = None,
generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> tp.List[Sample]:
"""Adds a batch of samples.
The samples are stored in the XP's sample output directory, under a corresponding
epoch folder. Each sample is assigned an id which is computed using the input data and their batch index.
In addition to the sample itself, a json file containing associated metadata is stored next to it.
Args:
sample_wavs (torch.Tensor): Batch of audio wavs to store. Tensor of shape [batch_size, channels, shape].
epoch (int): Current training epoch.
conditioning (list of dict[str, str], optional): List of conditions used during generation,
one per sample in the batch.
prompt_wavs (torch.Tensor, optional): Prompts used during generation. Tensor of shape
[batch_size, channels, shape].
ground_truth_wav (torch.Tensor, optional): Reference audio where prompts were extracted from.
Tensor of shape [batch_size, channels, shape].
generation_args (dict[str, Any], optional): Dictionary of other arguments used during generation.
Returns:
samples (list of Sample): The saved audio samples with prompts, ground truth and metadata.
"""
samples = []
for idx, wav in enumerate(samples_wavs):
prompt_wav = prompt_wavs[idx] if prompt_wavs is not None else None
gt_wav = ground_truth_wavs[idx] if ground_truth_wavs is not None else None
conditions = conditioning[idx] if conditioning is not None else None
samples.append(self.add_sample(wav, epoch, idx, conditions, prompt_wav, gt_wav, generation_args))
return samples
def get_samples(self, epoch: int = -1, max_epoch: int = -1, exclude_prompted: bool = False,
exclude_unprompted: bool = False, exclude_conditioned: bool = False,
exclude_unconditioned: bool = False) -> tp.Set[Sample]:
"""Returns a set of samples for this XP. Optionally, you can filter which samples to obtain.
Please note that existing samples are loaded during the manager's initialization, and added samples through this
manager are also tracked. Any other external changes are not tracked automatically, so creating a new manager
is the only way detect them.
Args:
epoch (int): If provided, only return samples corresponding to this epoch.
max_epoch (int): If provided, only return samples corresponding to the latest epoch that is <= max_epoch.
exclude_prompted (bool): If True, does not include samples that used a prompt.
exclude_unprompted (bool): If True, does not include samples that did not use a prompt.
exclude_conditioned (bool): If True, excludes samples that used conditioning.
exclude_unconditioned (bool): If True, excludes samples that did not use conditioning.
Returns:
Samples (set of Sample): The retrieved samples matching the provided filters.
"""
if max_epoch >= 0:
samples_epoch = max(sample.epoch for sample in self.samples if sample.epoch <= max_epoch)
else:
samples_epoch = self.latest_epoch if epoch < 0 else epoch
samples = {
sample
for sample in self.samples
if (
(sample.epoch == samples_epoch) and
(not exclude_prompted or sample.prompt is None) and
(not exclude_unprompted or sample.prompt is not None) and
(not exclude_conditioned or not sample.conditioning) and
(not exclude_unconditioned or sample.conditioning)
)
}
return samples
def slugify(value: tp.Any, allow_unicode: bool = False):
"""Process string for safer file naming.
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value.lower())
return re.sub(r"[-\s]+", "-", value).strip("-_")
def _match_stable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]:
# Create a dictionary of stable id -> sample per XP
stable_samples_per_xp = [{
sample.id: sample for sample in samples
if sample.prompt is not None or sample.conditioning
} for samples in samples_per_xp]
# Set of all stable ids
stable_ids = {id for samples in stable_samples_per_xp for id in samples.keys()}
# Dictionary of stable id -> list of samples. If an XP does not have it, assign None
stable_samples = {id: [xp.get(id) for xp in stable_samples_per_xp] for id in stable_ids}
# Filter out ids that contain None values (we only want matched samples after all)
# cast is necessary to avoid mypy linter errors.
return {id: tp.cast(tp.List[Sample], samples) for id, samples in stable_samples.items() if None not in samples}
def _match_unstable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]:
# For unstable ids, we use a sorted list since we'll match them in order
unstable_samples_per_xp = [[
sample for sample in sorted(samples, key=lambda x: x.id)
if sample.prompt is None and not sample.conditioning
] for samples in samples_per_xp]
# Trim samples per xp so all samples can have a match
min_len = min([len(samples) for samples in unstable_samples_per_xp])
unstable_samples_per_xp = [samples[:min_len] for samples in unstable_samples_per_xp]
# Dictionary of index -> list of matched samples
return {
f'noinput_{i}': [samples[i] for samples in unstable_samples_per_xp] for i in range(min_len)
}
def get_samples_for_xps(xps: tp.List[dora.XP], **kwargs) -> tp.Dict[str, tp.List[Sample]]:
"""Gets a dictionary of matched samples across the given XPs.
Each dictionary entry maps a sample id to a list of samples for that id. The number of samples per id
will always match the number of XPs provided and will correspond to each XP in the same order given.
In other words, only samples that can be match across all provided XPs will be returned
in order to satisfy this rule.
There are two types of ids that can be returned: stable and unstable.
* Stable IDs are deterministic ids that were computed by the SampleManager given a sample's inputs
(prompts/conditioning). This is why we can match them across XPs.
* Unstable IDs are of the form "noinput_{idx}" and are generated on-the-fly, in order to map samples
that used non-deterministic, random ids. This is the case for samples that did not use prompts or
conditioning for their generation. This function will sort these samples by their id and match them
by their index.
Args:
xps: a list of XPs to match samples from.
start_epoch (int): If provided, only return samples corresponding to this epoch or newer.
end_epoch (int): If provided, only return samples corresponding to this epoch or older.
exclude_prompted (bool): If True, does not include samples that used a prompt.
exclude_unprompted (bool): If True, does not include samples that did not use a prompt.
exclude_conditioned (bool): If True, excludes samples that used conditioning.
exclude_unconditioned (bool): If True, excludes samples that did not use conditioning.
"""
managers = [SampleManager(xp) for xp in xps]
samples_per_xp = [manager.get_samples(**kwargs) for manager in managers]
stable_samples = _match_stable_samples(samples_per_xp)
unstable_samples = _match_unstable_samples(samples_per_xp)
return dict(stable_samples, **unstable_samples)
| audiocraft-main | audiocraft/utils/samples/manager.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Dora Grids."""
| audiocraft-main | audiocraft/grids/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
import time
import typing as tp
from dora import Explorer
import treetable as tt
def get_sheep_ping(sheep) -> tp.Optional[str]:
"""Return the amount of time since the Sheep made some update
to its log. Returns a str using the relevant time unit."""
ping = None
if sheep.log is not None and sheep.log.exists():
delta = time.time() - sheep.log.stat().st_mtime
if delta > 3600 * 24:
ping = f'{delta / (3600 * 24):.1f}d'
elif delta > 3600:
ping = f'{delta / (3600):.1f}h'
elif delta > 60:
ping = f'{delta / 60:.1f}m'
else:
ping = f'{delta:.1f}s'
return ping
class BaseExplorer(ABC, Explorer):
"""Base explorer for AudioCraft grids.
All task specific solvers are expected to implement the `get_grid_metrics`
method to specify logic about metrics to display for a given task.
If additional stages are used, the child explorer must define how to handle
these new stages in the `process_history` and `process_sheep` methods.
"""
def stages(self):
return ["train", "valid", "evaluate"]
def get_grid_meta(self):
"""Returns the list of Meta information to display for each XP/job.
"""
return [
tt.leaf("index", align=">"),
tt.leaf("name", wrap=140),
tt.leaf("state"),
tt.leaf("sig", align=">"),
tt.leaf("sid", align="<"),
]
@abstractmethod
def get_grid_metrics(self):
"""Return the metrics that should be displayed in the tracking table.
"""
...
def process_sheep(self, sheep, history):
train = {
"epoch": len(history),
}
parts = {"train": train}
for metrics in history:
for key, sub in metrics.items():
part = parts.get(key, {})
if 'duration' in sub:
# Convert to minutes for readability.
sub['duration'] = sub['duration'] / 60.
part.update(sub)
parts[key] = part
ping = get_sheep_ping(sheep)
if ping is not None:
for name in self.stages():
if name not in parts:
parts[name] = {}
# Add the ping to each part for convenience.
parts[name]['ping'] = ping
return parts
| audiocraft-main | audiocraft/grids/_base_explorers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import treetable as tt
from .._base_explorers import BaseExplorer
class DiffusionExplorer(BaseExplorer):
eval_metrics = ["sisnr", "visqol"]
def stages(self):
return ["train", "valid", "valid_ema", "evaluate", "evaluate_ema"]
def get_grid_meta(self):
"""Returns the list of Meta information to display for each XP/job.
"""
return [
tt.leaf("index", align=">"),
tt.leaf("name", wrap=140),
tt.leaf("state"),
tt.leaf("sig", align=">"),
]
def get_grid_metrics(self):
"""Return the metrics that should be displayed in the tracking table.
"""
return [
tt.group(
"train",
[
tt.leaf("epoch"),
tt.leaf("loss", ".3%"),
],
align=">",
),
tt.group(
"valid",
[
tt.leaf("loss", ".3%"),
# tt.leaf("loss_0", ".3%"),
],
align=">",
),
tt.group(
"valid_ema",
[
tt.leaf("loss", ".3%"),
# tt.leaf("loss_0", ".3%"),
],
align=">",
),
tt.group(
"evaluate", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"),
tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"),
tt.leaf("rvm_3", ".4f"), ], align=">"
),
tt.group(
"evaluate_ema", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"),
tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"),
tt.leaf("rvm_3", ".4f")], align=">"
),
]
| audiocraft-main | audiocraft/grids/diffusion/_explorers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Diffusion grids."""
| audiocraft-main | audiocraft/grids/diffusion/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training of the 4 diffusion models described in
"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion"
(paper link).
"""
from ._explorers import DiffusionExplorer
@DiffusionExplorer
def explorer(launcher):
launcher.slurm_(gpus=4, partition='learnfair')
launcher.bind_({'solver': 'diffusion/default',
'dset': 'internal/music_10k_32khz'})
with launcher.job_array():
launcher({'filter.use': True, 'filter.idx_band': 0, "processor.use": False, 'processor.power_std': 0.4})
launcher({'filter.use': True, 'filter.idx_band': 1, "processor.use": False, 'processor.power_std': 0.4})
launcher({'filter.use': True, 'filter.idx_band': 2, "processor.use": True, 'processor.power_std': 0.4})
launcher({'filter.use': True, 'filter.idx_band': 3, "processor.use": True, 'processor.power_std': 0.75})
| audiocraft-main | audiocraft/grids/diffusion/4_bands_base_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=32, partition=partitions)
launcher.bind_(solver='musicgen/musicgen_melody_32khz')
# replace this by the desired music dataset
launcher.bind_(dset='internal/music_400k_32khz')
fsdp = {'autocast': False, 'fsdp.use': True}
medium = {'model/lm/model_scale': 'medium'}
large = {'model/lm/model_scale': 'large'}
cfg_low = {'classifier_free_guidance.training_dropout': 0.2}
wd_low = {'conditioners.description.t5.word_dropout': 0.2}
adam = {'optim.optimizer': 'adamw', 'optim.lr': 1e-4}
cache_path = {'conditioners.self_wav.chroma_stem.cache_path':
'/fsx-audio-craft-llm/jadecopet/experiments/audiocraft/caches/chroma_stem'}
# CACHE GENERATION JOBS
n_cache_gen_jobs = 4
gen_sub = launcher.slurm(gpus=1)
gen_sub.bind_(
cache_path, {
# the cache is always computed over the whole file, so duration doesn't matter here.
'dataset.segment_duration': 2.,
'dataset.batch_size': 8,
'dataset.train.permutation_on_files': True, # try to not repeat files.
'optim.epochs': 10,
'model/lm/model_scale': 'xsmall',
})
with gen_sub.job_array():
for gen_job in range(n_cache_gen_jobs):
gen_sub({'dataset.train.shuffle_seed': gen_job})
# ACTUAL TRAINING JOBS.
launcher.bind_(fsdp)
launcher.slurm_(gpus=32).bind_(label='32gpus')
with launcher.job_array():
sub = launcher.bind()
sub()
sub(cache_path)
launcher.slurm_(gpus=64).bind_(label='64gpus')
with launcher.job_array():
sub = launcher.bind()
sub(medium, adam)
launcher.slurm_(gpus=96).bind_(label='96gpus')
with launcher.job_array():
sub = launcher.bind()
sub(large, cfg_low, wd_low, adam, {'optim.max_norm': 3})
| audiocraft-main | audiocraft/grids/musicgen/musicgen_melody_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import treetable as tt
from .._base_explorers import BaseExplorer
class LMExplorer(BaseExplorer):
eval_metrics: tp.List[str] = []
def stages(self) -> tp.List[str]:
return ['train', 'valid']
def get_grid_metrics(self):
"""Return the metrics that should be displayed in the tracking table."""
return [
tt.group(
'train',
[
tt.leaf('epoch'),
tt.leaf('duration', '.1f'), # duration in minutes
tt.leaf('ping'),
tt.leaf('ce', '.4f'), # cross entropy
tt.leaf("ppl", '.3f'), # perplexity
],
align='>',
),
tt.group(
'valid',
[
tt.leaf('ce', '.4f'),
tt.leaf('ppl', '.3f'),
tt.leaf('best_ppl', '.3f'),
],
align='>',
),
]
def process_sheep(self, sheep, history):
parts = super().process_sheep(sheep, history)
track_by = {'ppl': 'lower'} # values should be in ['lower', 'higher']
best_metrics = {k: (1 if v == 'lower' else -1) * float('inf') for k, v in track_by.items()}
def comparator(mode, a, b):
return a < b if mode == 'lower' else a > b
for metrics in history:
for key, sub in metrics.items():
for metric in track_by:
# for the validation set, keep track of best metrics (ppl in this example)
# this is so we can conveniently compare metrics between runs in the grid
if key == 'valid' and metric in sub and comparator(
track_by[metric], sub[metric], best_metrics[metric]
):
best_metrics[metric] = sub[metric]
if 'valid' in parts:
parts['valid'].update({f'best_{k}': v for k, v in best_metrics.items()})
return parts
class GenerationEvalExplorer(BaseExplorer):
eval_metrics: tp.List[str] = []
def stages(self) -> tp.List[str]:
return ['evaluate']
def get_grid_metrics(self):
"""Return the metrics that should be displayed in the tracking table."""
return [
tt.group(
'evaluate',
[
tt.leaf('epoch', '.3f'),
tt.leaf('duration', '.1f'),
tt.leaf('ping'),
tt.leaf('ce', '.4f'),
tt.leaf('ppl', '.3f'),
tt.leaf('fad', '.3f'),
tt.leaf('kld', '.3f'),
tt.leaf('text_consistency', '.3f'),
tt.leaf('chroma_cosine', '.3f'),
],
align='>',
),
]
| audiocraft-main | audiocraft/grids/musicgen/_explorers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""MusicGen grids."""
| audiocraft-main | audiocraft/grids/musicgen/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=32, partition=partitions)
launcher.bind_(solver='musicgen/musicgen_base_32khz')
# replace this by the desired music dataset
launcher.bind_(dset='internal/music_400k_32khz')
launcher.bind_(conditioner='clapemb2music')
fsdp = {'autocast': False, 'fsdp.use': True}
cache_path = {'conditioners.description.clap.cache_path':
'/fsx-audio-craft-llm/jadecopet/experiments/audiocraft/caches/clap_embed_music'}
text_wav_training_opt = {'conditioners.description.clap.text_p': 0.5}
launcher.bind_(fsdp)
launcher.slurm_(gpus=32).bind_(label='32gpus')
with launcher.job_array():
launcher()
launcher(text_wav_training_opt)
launcher(cache_path)
launcher(cache_path, text_wav_training_opt)
| audiocraft-main | audiocraft/grids/musicgen/musicgen_clapemb_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluation with objective metrics for the pretrained MusicGen models.
This grid takes signature from the training grid and runs evaluation-only stage.
When running the grid for the first time, please use:
REGEN=1 dora grid musicgen.musicgen_pretrained_32khz_eval
and re-use the REGEN=1 option when the grid is changed to force regenerating it.
Note that you need the proper metrics external libraries setup to use all
the objective metrics activated in this grid. Refer to the README for more information.
"""
import os
from ._explorers import GenerationEvalExplorer
from ...environment import AudioCraftEnvironment
from ... import train
def eval(launcher, batch_size: int = 32, eval_melody: bool = False):
opts = {
'dset': 'audio/musiccaps_32khz',
'solver/musicgen/evaluation': 'objective_eval',
'execute_only': 'evaluate',
'+dataset.evaluate.batch_size': batch_size,
'+metrics.fad.tf.batch_size': 16,
}
# chroma-specific evaluation
chroma_opts = {
'dset': 'internal/music_400k_32khz',
'dataset.evaluate.segment_duration': 30,
'dataset.evaluate.num_samples': 1000,
'evaluate.metrics.chroma_cosine': True,
'evaluate.metrics.fad': False,
'evaluate.metrics.kld': False,
'evaluate.metrics.text_consistency': False,
}
# binary for FAD computation: replace this path with your own path
metrics_opts = {
'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research'
}
opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.}
opt2 = {'transformer_lm.two_step_cfg': True}
sub = launcher.bind(opts)
sub.bind_(metrics_opts)
# base objective metrics
sub(opt1, opt2)
if eval_melody:
# chroma-specific metrics
sub(opt1, opt2, chroma_opts)
@GenerationEvalExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=4, partition=partitions)
if 'REGEN' not in os.environ:
folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1]
with launcher.job_array():
for sig in folder.iterdir():
if not sig.is_symlink():
continue
xp = train.main.get_xp_from_sig(sig.name)
launcher(xp.argv)
return
with launcher.job_array():
musicgen_base = launcher.bind(solver="musicgen/musicgen_base_32khz")
musicgen_base.bind_({'autocast': False, 'fsdp.use': True})
# base musicgen models
musicgen_base_small = musicgen_base.bind({'continue_from': '//pretrained/facebook/musicgen-small'})
eval(musicgen_base_small, batch_size=128)
musicgen_base_medium = musicgen_base.bind({'continue_from': '//pretrained/facebook/musicgen-medium'})
musicgen_base_medium.bind_({'model/lm/model_scale': 'medium'})
eval(musicgen_base_medium, batch_size=128)
musicgen_base_large = musicgen_base.bind({'continue_from': '//pretrained/facebook/musicgen-large'})
musicgen_base_large.bind_({'model/lm/model_scale': 'large'})
eval(musicgen_base_large, batch_size=128)
# melody musicgen model
musicgen_melody = launcher.bind(solver="musicgen/musicgen_melody_32khz")
musicgen_melody.bind_({'autocast': False, 'fsdp.use': True})
musicgen_melody_medium = musicgen_melody.bind({'continue_from': '//pretrained/facebook/musicgen-melody'})
musicgen_melody_medium.bind_({'model/lm/model_scale': 'medium'})
eval(musicgen_melody_medium, batch_size=128, eval_melody=True)
| audiocraft-main | audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=32, partition=partitions)
launcher.bind_(solver='musicgen/musicgen_base_32khz')
# replace this by the desired music dataset
launcher.bind_(dset='internal/music_400k_32khz')
fsdp = {'autocast': False, 'fsdp.use': True}
medium = {'model/lm/model_scale': 'medium'}
large = {'model/lm/model_scale': 'large'}
cfg_low = {'classifier_free_guidance.training_dropout': 0.2}
wd_low = {'conditioners.description.t5.word_dropout': 0.2}
adam = {'optim.optimizer': 'adamw', 'optim.lr': 1e-4}
launcher.bind_(fsdp)
launcher.slurm_(gpus=32).bind_(label='32gpus')
with launcher.job_array():
sub = launcher.bind()
sub()
launcher.slurm_(gpus=64).bind_(label='64gpus')
with launcher.job_array():
sub = launcher.bind()
sub(medium, adam)
launcher.slurm_(gpus=96).bind_(label='96gpus')
with launcher.job_array():
sub = launcher.bind()
sub(large, cfg_low, wd_low, adam, {'optim.max_norm': 3})
| audiocraft-main | audiocraft/grids/musicgen/musicgen_base_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=32, partition=partitions)
launcher.bind_(solver='musicgen/musicgen_base_32khz')
# replace this by the desired music dataset
launcher.bind_(dset='internal/music_400k_32khz')
fsdp = {'autocast': False, 'fsdp.use': True}
medium = {'model/lm/model_scale': 'medium'}
large = {'model/lm/model_scale': 'large'}
cfg_low = {'classifier_free_guidance.training_dropout': 0.2}
wd_low = {'conditioners.description.t5.word_dropout': 0.2}
adam = {'optim.optimizer': 'adamw', 'optim.lr': 1e-4}
# BEGINNING OF CACHE WRITING JOBS.
cache_write = {
'cache.path': '/fsx-codegen/defossez/cache/interleave_stereo_nv_32k',
'cache.write': True,
'generate.every': 500,
'evaluate.every': 500,
'logging.log_updates': 50,
}
cache_sub = launcher.bind({'model/lm/model_scale': 'xsmall', 'conditioner': 'none'})
cache_sub.bind_({'deadlock.use': True})
cache_sub.slurm_(gpus=8)
with launcher.job_array():
num_shards = 10 # total number of jobs running in parallel.
for shard in range(0, num_shards):
launcher(cache_write, {'cache.write_num_shards': num_shards, 'cache.write_shard': shard})
# REMOVE THE FOLLOWING RETURN STATEMENT ONCE THE ABOVE JOBS ARE DONE,
# OR SUFFICIENTLY AHEAD.
return
cache = {
'cache.path': '/fsx-codegen/defossez/cache/interleave_stereo_nv_32k',
}
launcher.bind_(fsdp, cache)
launcher.slurm_(gpus=32).bind_(label='32gpus')
with launcher.job_array():
sub = launcher.bind()
sub()
launcher.slurm_(gpus=64).bind_(label='64gpus')
with launcher.job_array():
sub = launcher.bind()
sub(medium, adam)
launcher.slurm_(gpus=96).bind_(label='96gpus')
with launcher.job_array():
sub = launcher.bind()
sub(large, cfg_low, wd_low, adam, {'optim.max_norm': 3})
| audiocraft-main | audiocraft/grids/musicgen/musicgen_base_cached_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import treetable as tt
from .._base_explorers import BaseExplorer
class CompressionExplorer(BaseExplorer):
eval_metrics = ["sisnr", "visqol"]
def stages(self):
return ["train", "valid", "evaluate"]
def get_grid_meta(self):
"""Returns the list of Meta information to display for each XP/job.
"""
return [
tt.leaf("index", align=">"),
tt.leaf("name", wrap=140),
tt.leaf("state"),
tt.leaf("sig", align=">"),
]
def get_grid_metrics(self):
"""Return the metrics that should be displayed in the tracking table.
"""
return [
tt.group(
"train",
[
tt.leaf("epoch"),
tt.leaf("bandwidth", ".2f"),
tt.leaf("adv", ".4f"),
tt.leaf("d_loss", ".4f"),
],
align=">",
),
tt.group(
"valid",
[
tt.leaf("bandwidth", ".2f"),
tt.leaf("adv", ".4f"),
tt.leaf("msspec", ".4f"),
tt.leaf("sisnr", ".2f"),
],
align=">",
),
tt.group(
"evaluate", [tt.leaf(name, ".3f") for name in self.eval_metrics], align=">"
),
]
| audiocraft-main | audiocraft/grids/compression/_explorers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Grid search file, simply list all the exp you want in `explorer`.
Any new exp added there will be scheduled.
You can cancel and experiment by commenting its line.
This grid shows how to train the new AudioGen EnCodec model at 16 kHz.
"""
from ._explorers import CompressionExplorer
from ...environment import AudioCraftEnvironment
@CompressionExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=8, partition=partitions)
# use configuration for AudioGen's EnCodec model trained on monophonic audio sampled at 16 kHz
# AudioGen's EnCodec is trained with a total stride of 320 leading to a frame rate of 50 hz
launcher.bind_(solver='compression/encodec_audiogen_16khz')
# replace this by the desired sound dataset
launcher.bind_(dset='internal/sounds_16khz')
# launch xp
launcher()
| audiocraft-main | audiocraft/grids/compression/encodec_audiogen_16khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""EnCodec grids."""
| audiocraft-main | audiocraft/grids/compression/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Grid search file, simply list all the exp you want in `explorer`.
Any new exp added there will be scheduled.
You can cancel and experiment by commenting its line.
This grid shows how to train a MusicGen EnCodec model at 32 kHz.
"""
from ._explorers import CompressionExplorer
from ...environment import AudioCraftEnvironment
@CompressionExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=8, partition=partitions)
# use configuration for MusicGen's EnCodec model trained on monophonic audio sampled at 32 kHz
# MusicGen's EnCodec is trained with a total stride of 640 leading to a frame rate of 50 hz
launcher.bind_(solver='compression/encodec_musicgen_32khz')
# replace this by the desired music dataset
launcher.bind_(dset='internal/music_400k_32khz')
# launch xp
launcher()
launcher({
'metrics.visqol.bin': '/data/home/jadecopet/local/usr/opt/visqol',
'label': 'visqol',
'evaluate.metrics.visqol': True
})
| audiocraft-main | audiocraft/grids/compression/encodec_musicgen_32khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Grid search file, simply list all the exp you want in `explorer`.
Any new exp added there will be scheduled.
You can cancel and experiment by commenting its line.
This grid shows how to train a base causal EnCodec model at 24 kHz.
"""
from ._explorers import CompressionExplorer
from ...environment import AudioCraftEnvironment
@CompressionExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=8, partition=partitions)
# base causal EnCodec trained on monophonic audio sampled at 24 kHz
launcher.bind_(solver='compression/encodec_base_24khz')
# replace this by the desired dataset
launcher.bind_(dset='audio/example')
# launch xp
launcher()
| audiocraft-main | audiocraft/grids/compression/encodec_base_24khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Grid search file, simply list all the exp you want in `explorer`.
Any new exp added there will be scheduled.
You can cancel and experiment by commenting its line.
This grid is a minimal example for debugging compression task
and how to override parameters directly in a grid.
Learn more about dora grids: https://github.com/facebookresearch/dora
"""
from ._explorers import CompressionExplorer
from ...environment import AudioCraftEnvironment
@CompressionExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=2, partition=partitions)
launcher.bind_(solver='compression/debug')
with launcher.job_array():
# base debug task using config from solver=compression/debug
launcher()
# we can override parameters in the grid to launch additional xps
launcher({'rvq.bins': 2048, 'rvq.n_q': 4})
| audiocraft-main | audiocraft/grids/compression/debug.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""AudioGen grids."""
| audiocraft-main | audiocraft/grids/audiogen/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ..musicgen._explorers import LMExplorer
from ...environment import AudioCraftEnvironment
@LMExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=64, partition=partitions)
launcher.bind_(solver='audiogen/audiogen_base_16khz')
# replace this by the desired environmental sound dataset
launcher.bind_(dset='internal/sounds_16khz')
fsdp = {'autocast': False, 'fsdp.use': True}
medium = {'model/lm/model_scale': 'medium'}
launcher.bind_(fsdp)
launcher(medium)
| audiocraft-main | audiocraft/grids/audiogen/audiogen_base_16khz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluation with objective metrics for the pretrained AudioGen models.
This grid takes signature from the training grid and runs evaluation-only stage.
When running the grid for the first time, please use:
REGEN=1 dora grid audiogen.audiogen_pretrained_16khz_eval
and re-use the REGEN=1 option when the grid is changed to force regenerating it.
Note that you need the proper metrics external libraries setup to use all
the objective metrics activated in this grid. Refer to the README for more information.
"""
import os
from ..musicgen._explorers import GenerationEvalExplorer
from ...environment import AudioCraftEnvironment
from ... import train
def eval(launcher, batch_size: int = 32):
opts = {
'dset': 'audio/audiocaps_16khz',
'solver/audiogen/evaluation': 'objective_eval',
'execute_only': 'evaluate',
'+dataset.evaluate.batch_size': batch_size,
'+metrics.fad.tf.batch_size': 32,
}
# binary for FAD computation: replace this path with your own path
metrics_opts = {
'metrics.fad.tf.bin': '/data/home/jadecopet/local/usr/opt/google-research'
}
opt1 = {'generate.lm.use_sampling': True, 'generate.lm.top_k': 250, 'generate.lm.top_p': 0.}
opt2 = {'transformer_lm.two_step_cfg': True}
sub = launcher.bind(opts)
sub.bind_(metrics_opts)
# base objective metrics
sub(opt1, opt2)
@GenerationEvalExplorer
def explorer(launcher):
partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global'])
launcher.slurm_(gpus=4, partition=partitions)
if 'REGEN' not in os.environ:
folder = train.main.dora.dir / 'grids' / __name__.split('.', 2)[-1]
with launcher.job_array():
for sig in folder.iterdir():
if not sig.is_symlink():
continue
xp = train.main.get_xp_from_sig(sig.name)
launcher(xp.argv)
return
audiogen_base = launcher.bind(solver="audiogen/audiogen_base_16khz")
audiogen_base.bind_({'autocast': False, 'fsdp.use': True})
audiogen_base_medium = audiogen_base.bind({'continue_from': '//pretrained/facebook/audiogen-medium'})
audiogen_base_medium.bind_({'model/lm/model_scale': 'medium'})
eval(audiogen_base_medium, batch_size=128)
| audiocraft-main | audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
All the functions to build the relevant models and modules
from the Hydra config.
"""
import typing as tp
import audiocraft
import omegaconf
import torch
from .encodec import CompressionModel, EncodecModel
from .lm import LMModel
from ..modules.codebooks_patterns import (
CodebooksPatternProvider,
DelayedPatternProvider,
MusicLMPattern,
ParallelPatternProvider,
UnrolledPatternProvider,
VALLEPattern,
)
from ..modules.conditioners import (
BaseConditioner,
ChromaStemConditioner,
CLAPEmbeddingConditioner,
ConditionFuser,
ConditioningProvider,
LUTConditioner,
T5Conditioner,
)
from .unet import DiffusionUnet
from .. import quantization as qt
from ..utils.utils import dict_from_config
from ..modules.diffusion_schedule import MultiBandProcessor, SampleProcessor
def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
klass = {
'no_quant': qt.DummyQuantizer,
'rvq': qt.ResidualVectorQuantizer
}[quantizer]
kwargs = dict_from_config(getattr(cfg, quantizer))
if quantizer != 'no_quant':
kwargs['dimension'] = dimension
return klass(**kwargs)
def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
if encoder_name == 'seanet':
kwargs = dict_from_config(getattr(cfg, 'seanet'))
encoder_override_kwargs = kwargs.pop('encoder')
decoder_override_kwargs = kwargs.pop('decoder')
encoder_kwargs = {**kwargs, **encoder_override_kwargs}
decoder_kwargs = {**kwargs, **decoder_override_kwargs}
encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
return encoder, decoder
else:
raise KeyError(f"Unexpected compression model {cfg.compression_model}")
def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
"""Instantiate a compression model."""
if cfg.compression_model == 'encodec':
kwargs = dict_from_config(getattr(cfg, 'encodec'))
encoder_name = kwargs.pop('autoencoder')
quantizer_name = kwargs.pop('quantizer')
encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
frame_rate = kwargs['sample_rate'] // encoder.hop_length
renormalize = kwargs.pop('renormalize', False)
# deprecated params
kwargs.pop('renorm', None)
return EncodecModel(encoder, decoder, quantizer,
frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
else:
raise KeyError(f"Unexpected compression model {cfg.compression_model}")
def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
"""Instantiate a transformer LM."""
if cfg.lm_model == 'transformer_lm':
kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
n_q = kwargs['n_q']
q_modeling = kwargs.pop('q_modeling', None)
codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']
fuser = get_condition_fuser(cfg)
condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically
kwargs['cross_attention'] = True
if codebooks_pattern_cfg.modeling is None:
assert q_modeling is not None, \
"LM model should either have a codebook pattern defined or transformer_lm.q_modeling"
codebooks_pattern_cfg = omegaconf.OmegaConf.create(
{'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
)
pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
return LMModel(
pattern_provider=pattern_provider,
condition_provider=condition_provider,
fuser=fuser,
cfg_dropout=cfg_prob,
cfg_coef=cfg_coef,
attribute_dropout=attribute_dropout,
dtype=getattr(torch, cfg.dtype),
device=cfg.device,
**kwargs
).to(cfg.device)
else:
raise KeyError(f"Unexpected LM model {cfg.lm_model}")
def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
"""Instantiate a conditioning model."""
device = cfg.device
duration = cfg.dataset.segment_duration
cfg = getattr(cfg, 'conditioners')
dict_cfg = {} if cfg is None else dict_from_config(cfg)
conditioners: tp.Dict[str, BaseConditioner] = {}
condition_provider_args = dict_cfg.pop('args', {})
condition_provider_args.pop('merge_text_conditions_p', None)
condition_provider_args.pop('drop_desc_p', None)
for cond, cond_cfg in dict_cfg.items():
model_type = cond_cfg['model']
model_args = cond_cfg[model_type]
if model_type == 't5':
conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
elif model_type == 'lut':
conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
elif model_type == 'chroma_stem':
conditioners[str(cond)] = ChromaStemConditioner(
output_dim=output_dim,
duration=duration,
device=device,
**model_args
)
elif model_type == 'clap':
conditioners[str(cond)] = CLAPEmbeddingConditioner(
output_dim=output_dim,
device=device,
**model_args
)
else:
raise ValueError(f"Unrecognized conditioning model: {model_type}")
conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
return conditioner
def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
"""Instantiate a condition fuser object."""
fuser_cfg = getattr(cfg, 'fuser')
fuser_methods = ['sum', 'cross', 'prepend', 'input_interpolate']
fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
return fuser
def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
"""Instantiate a codebooks pattern provider object."""
pattern_providers = {
'parallel': ParallelPatternProvider,
'delay': DelayedPatternProvider,
'unroll': UnrolledPatternProvider,
'valle': VALLEPattern,
'musiclm': MusicLMPattern,
}
name = cfg.modeling
kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
klass = pattern_providers[name]
return klass(n_q, **kwargs)
def get_debug_compression_model(device='cpu', sample_rate: int = 32000):
"""Instantiate a debug compression model to be used for unit tests."""
assert sample_rate in [16000, 32000], "unsupported sample rate for debug compression model"
model_ratios = {
16000: [10, 8, 8], # 25 Hz at 16kHz
32000: [10, 8, 16] # 25 Hz at 32kHz
}
ratios: tp.List[int] = model_ratios[sample_rate]
frame_rate = 25
seanet_kwargs: dict = {
'n_filters': 4,
'n_residual_layers': 1,
'dimension': 32,
'ratios': ratios,
}
print(seanet_kwargs)
encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
init_x = torch.randn(8, 32, 128)
quantizer(init_x, 1) # initialize kmeans etc.
compression_model = EncodecModel(
encoder, decoder, quantizer,
frame_rate=frame_rate, sample_rate=sample_rate, channels=1).to(device)
return compression_model.eval()
def get_diffusion_model(cfg: omegaconf.DictConfig):
# TODO Find a way to infer the channels from dset
channels = cfg.channels
num_steps = cfg.schedule.num_steps
return DiffusionUnet(
chin=channels, num_steps=num_steps, **cfg.diffusion_unet)
def get_processor(cfg, sample_rate: int = 24000):
sample_processor = SampleProcessor()
if cfg.use:
kw = dict(cfg)
kw.pop('use')
kw.pop('name')
if cfg.name == "multi_band_processor":
sample_processor = MultiBandProcessor(sample_rate=sample_rate, **kw)
return sample_processor
def get_debug_lm_model(device='cpu'):
"""Instantiate a debug LM to be used for unit tests."""
pattern = DelayedPatternProvider(n_q=4)
dim = 16
providers = {
'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
}
condition_provider = ConditioningProvider(providers)
fuser = ConditionFuser(
{'cross': ['description'], 'prepend': [],
'sum': [], 'input_interpolate': []})
lm = LMModel(
pattern, condition_provider, fuser,
n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
cross_attention=True, causal=True)
return lm.to(device).eval()
def get_wrapped_compression_model(
compression_model: CompressionModel,
cfg: omegaconf.DictConfig) -> CompressionModel:
# more to come.
return compression_model
| audiocraft-main | audiocraft/models/builders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Main model for using MusicGen. This will combine all the required components
and provide easy access to the generation API.
"""
import typing as tp
import warnings
import torch
from .encodec import CompressionModel
from .lm import LMModel
from .builders import get_debug_compression_model, get_debug_lm_model
from .loaders import load_compression_model, load_lm_model
from ..data.audio_utils import convert_audio
from ..modules.conditioners import ConditioningAttributes, WavCondition
from ..utils.autocast import TorchAutocast
MelodyList = tp.List[tp.Optional[torch.Tensor]]
MelodyType = tp.Union[torch.Tensor, MelodyList]
# backward compatible names mapping
_HF_MODEL_CHECKPOINTS_MAP = {
"small": "facebook/musicgen-small",
"medium": "facebook/musicgen-medium",
"large": "facebook/musicgen-large",
"melody": "facebook/musicgen-melody",
}
class MusicGen:
"""MusicGen main model with convenient generation API.
Args:
name (str): name of the model.
compression_model (CompressionModel): Compression model
used to map audio to invertible discrete representations.
lm (LMModel): Language model over discrete representations.
max_duration (float, optional): maximum duration the model can produce,
otherwise, inferred from the training params.
"""
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
max_duration: tp.Optional[float] = None):
self.name = name
self.compression_model = compression_model
self.lm = lm
if max_duration is None:
if hasattr(lm, 'cfg'):
max_duration = lm.cfg.dataset.segment_duration # type: ignore
else:
raise ValueError("You must provide max_duration when building directly MusicGen")
assert max_duration is not None
self.max_duration: float = max_duration
self.device = next(iter(lm.parameters())).device
self.generation_params: dict = {}
self.set_generation_params(duration=15) # 15 seconds by default
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
if self.device.type == 'cpu':
self.autocast = TorchAutocast(enabled=False)
else:
self.autocast = TorchAutocast(
enabled=True, device_type=self.device.type, dtype=torch.float16)
@property
def frame_rate(self) -> float:
"""Roughly the number of AR steps per seconds."""
return self.compression_model.frame_rate
@property
def sample_rate(self) -> int:
"""Sample rate of the generated audio."""
return self.compression_model.sample_rate
@property
def audio_channels(self) -> int:
"""Audio channels of the generated audio."""
return self.compression_model.channels
@staticmethod
def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):
"""Return pretrained model, we provide four models:
- facebook/musicgen-small (300M), text to music,
# see: https://huggingface.co/facebook/musicgen-small
- facebook/musicgen-medium (1.5B), text to music,
# see: https://huggingface.co/facebook/musicgen-medium
- facebook/musicgen-melody (1.5B) text to music and text+melody to music,
# see: https://huggingface.co/facebook/musicgen-melody
- facebook/musicgen-large (3.3B), text to music,
# see: https://huggingface.co/facebook/musicgen-large
"""
if device is None:
if torch.cuda.device_count():
device = 'cuda'
else:
device = 'cpu'
if name == 'debug':
# used only for unit tests
compression_model = get_debug_compression_model(device)
lm = get_debug_lm_model(device)
return MusicGen(name, compression_model, lm, max_duration=30)
if name in _HF_MODEL_CHECKPOINTS_MAP:
warnings.warn(
"MusicGen pretrained model relying on deprecated checkpoint mapping. " +
f"Please use full pre-trained id instead: facebook/musicgen-{name}")
name = _HF_MODEL_CHECKPOINTS_MAP[name]
lm = load_lm_model(name, device=device)
compression_model = load_compression_model(name, device=device)
if 'self_wav' in lm.condition_provider.conditioners:
lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True
return MusicGen(name, compression_model, lm)
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
top_p: float = 0.0, temperature: float = 1.0,
duration: float = 30.0, cfg_coef: float = 3.0,
two_step_cfg: bool = False, extend_stride: float = 18):
"""Set the generation parameters for MusicGen.
Args:
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
top_k (int, optional): top_k used for sampling. Defaults to 250.
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
duration (float, optional): Duration of the generated waveform. Defaults to 30.0.
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
instead of batching together the two. This has some impact on how things
are padded but seems to have little impact in practice.
extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much
should we extend the audio each time. Larger values will mean less context is
preserved, and shorter value will require extra computations.
"""
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
self.extend_stride = extend_stride
self.duration = duration
self.generation_params = {
'use_sampling': use_sampling,
'temp': temperature,
'top_k': top_k,
'top_p': top_p,
'cfg_coef': cfg_coef,
'two_step_cfg': two_step_cfg,
}
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
"""Override the default progress callback."""
self._progress_callback = progress_callback
def generate_unconditional(self, num_samples: int, progress: bool = False,
return_tokens: bool = False) -> tp.Union[torch.Tensor,
tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples in an unconditional manner.
Args:
num_samples (int): Number of samples to be generated.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
descriptions: tp.List[tp.Optional[str]] = [None] * num_samples
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on text.
Args:
descriptions (list of str): A list of strings used as text conditioning.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
assert prompt_tokens is None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,
melody_sample_rate: int, progress: bool = False,
return_tokens: bool = False) -> tp.Union[torch.Tensor,
tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on text and melody.
Args:
descriptions (list of str): A list of strings used as text conditioning.
melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as
melody conditioning. Should have shape [B, C, T] with B matching the description length,
C=1 or 2. It can be [C, T] if there is a single description. It can also be
a list of [C, T] tensors.
melody_sample_rate: (int): Sample rate of the melody waveforms.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if isinstance(melody_wavs, torch.Tensor):
if melody_wavs.dim() == 2:
melody_wavs = melody_wavs[None]
if melody_wavs.dim() != 3:
raise ValueError("Melody wavs should have a shape [B, C, T].")
melody_wavs = list(melody_wavs)
else:
for melody in melody_wavs:
if melody is not None:
assert melody.dim() == 2, "One melody in the list has the wrong number of dims."
melody_wavs = [
convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)
if wav is not None else None
for wav in melody_wavs]
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,
melody_wavs=melody_wavs)
assert prompt_tokens is None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
progress: bool = False, return_tokens: bool = False) \
-> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:
"""Generate samples conditioned on audio prompts.
Args:
prompt (torch.Tensor): A batch of waveforms used for continuation.
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if prompt.dim() == 2:
prompt = prompt[None]
if prompt.dim() != 3:
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
if descriptions is None:
descriptions = [None] * len(prompt)
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
assert prompt_tokens is not None
tokens = self._generate_tokens(attributes, prompt_tokens, progress)
if return_tokens:
return self.generate_audio(tokens), tokens
return self.generate_audio(tokens)
@torch.no_grad()
def _prepare_tokens_and_attributes(
self,
descriptions: tp.Sequence[tp.Optional[str]],
prompt: tp.Optional[torch.Tensor],
melody_wavs: tp.Optional[MelodyList] = None,
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
"""Prepare model inputs.
Args:
descriptions (list of str): A list of strings used as text conditioning.
prompt (torch.Tensor): A batch of waveforms used for continuation.
melody_wavs (torch.Tensor, optional): A batch of waveforms
used as melody conditioning. Defaults to None.
"""
attributes = [
ConditioningAttributes(text={'description': description})
for description in descriptions]
if melody_wavs is None:
for attr in attributes:
attr.wav['self_wav'] = WavCondition(
torch.zeros((1, 1, 1), device=self.device),
torch.tensor([0], device=self.device),
sample_rate=[self.sample_rate],
path=[None])
else:
if 'self_wav' not in self.lm.condition_provider.conditioners:
raise RuntimeError("This model doesn't support melody conditioning. "
"Use the `melody` model.")
assert len(melody_wavs) == len(descriptions), \
f"number of melody wavs must match number of descriptions! " \
f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}"
for attr, melody in zip(attributes, melody_wavs):
if melody is None:
attr.wav['self_wav'] = WavCondition(
torch.zeros((1, 1, 1), device=self.device),
torch.tensor([0], device=self.device),
sample_rate=[self.sample_rate],
path=[None])
else:
attr.wav['self_wav'] = WavCondition(
melody[None].to(device=self.device),
torch.tensor([melody.shape[-1]], device=self.device),
sample_rate=[self.sample_rate],
path=[None],
)
if prompt is not None:
if descriptions is not None:
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
prompt = prompt.to(self.device)
prompt_tokens, scale = self.compression_model.encode(prompt)
assert scale is None
else:
prompt_tokens = None
return attributes, prompt_tokens
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
"""Generate discrete audio tokens given audio prompt and/or conditions.
Args:
attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
Returns:
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
"""
total_gen_len = int(self.duration * self.frame_rate)
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
current_gen_offset: int = 0
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
generated_tokens += current_gen_offset
if self._progress_callback is not None:
# Note that total_gen_len might be quite wrong depending on the
# codebook pattern used, but with delay it is almost accurate.
self._progress_callback(generated_tokens, total_gen_len)
else:
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
if prompt_tokens is not None:
assert max_prompt_len >= prompt_tokens.shape[-1], \
"Prompt is longer than audio to generate"
callback = None
if progress:
callback = _progress_callback
if self.duration <= self.max_duration:
# generate by sampling from LM, simple case.
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
else:
# now this gets a bit messier, we need to handle prompts,
# melody conditioning etc.
ref_wavs = [attr.wav['self_wav'] for attr in attributes]
all_tokens = []
if prompt_tokens is None:
prompt_length = 0
else:
all_tokens.append(prompt_tokens)
prompt_length = prompt_tokens.shape[-1]
stride_tokens = int(self.frame_rate * self.extend_stride)
while current_gen_offset + prompt_length < total_gen_len:
time_offset = current_gen_offset / self.frame_rate
chunk_duration = min(self.duration - time_offset, self.max_duration)
max_gen_len = int(chunk_duration * self.frame_rate)
for attr, ref_wav in zip(attributes, ref_wavs):
wav_length = ref_wav.length.item()
if wav_length == 0:
continue
# We will extend the wav periodically if it not long enough.
# we have to do it here rather than in conditioners.py as otherwise
# we wouldn't have the full wav.
initial_position = int(time_offset * self.sample_rate)
wav_target_length = int(self.max_duration * self.sample_rate)
positions = torch.arange(initial_position,
initial_position + wav_target_length, device=self.device)
attr.wav['self_wav'] = WavCondition(
ref_wav[0][..., positions % wav_length],
torch.full_like(ref_wav[1], wav_target_length),
[self.sample_rate] * ref_wav[0].size(0),
[None], [0.])
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
if prompt_tokens is None:
all_tokens.append(gen_tokens)
else:
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
prompt_tokens = gen_tokens[:, :, stride_tokens:]
prompt_length = prompt_tokens.shape[-1]
current_gen_offset += stride_tokens
gen_tokens = torch.cat(all_tokens, dim=-1)
return gen_tokens
def generate_audio(self, gen_tokens: torch.Tensor):
"""Generate Audio from tokens"""
assert gen_tokens.dim() == 3
with torch.no_grad():
gen_audio = self.compression_model.decode(gen_tokens, None)
return gen_audio
| audiocraft-main | audiocraft/models/musicgen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Pytorch Unet Module used for diffusion.
"""
from dataclasses import dataclass
import typing as tp
import torch
from torch import nn
from torch.nn import functional as F
from audiocraft.modules.transformer import StreamingTransformer, create_sin_embedding
@dataclass
class Output:
sample: torch.Tensor
def get_model(cfg, channels: int, side: int, num_steps: int):
if cfg.model == 'unet':
return DiffusionUnet(
chin=channels, num_steps=num_steps, **cfg.diffusion_unet)
else:
raise RuntimeError('Not Implemented')
class ResBlock(nn.Module):
def __init__(self, channels: int, kernel: int = 3, norm_groups: int = 4,
dilation: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
stride = 1
padding = dilation * (kernel - stride) // 2
Conv = nn.Conv1d
Drop = nn.Dropout1d
self.norm1 = nn.GroupNorm(norm_groups, channels)
self.conv1 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
self.activation1 = activation()
self.dropout1 = Drop(dropout)
self.norm2 = nn.GroupNorm(norm_groups, channels)
self.conv2 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
self.activation2 = activation()
self.dropout2 = Drop(dropout)
def forward(self, x):
h = self.dropout1(self.conv1(self.activation1(self.norm1(x))))
h = self.dropout2(self.conv2(self.activation2(self.norm2(h))))
return x + h
class DecoderLayer(nn.Module):
def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
padding = (kernel - stride) // 2
self.res_blocks = nn.Sequential(
*[ResBlock(chin, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
for idx in range(res_blocks)])
self.norm = nn.GroupNorm(norm_groups, chin)
ConvTr = nn.ConvTranspose1d
self.convtr = ConvTr(chin, chout, kernel, stride, padding, bias=False)
self.activation = activation()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.res_blocks(x)
x = self.norm(x)
x = self.activation(x)
x = self.convtr(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
dropout: float = 0.):
super().__init__()
padding = (kernel - stride) // 2
Conv = nn.Conv1d
self.conv = Conv(chin, chout, kernel, stride, padding, bias=False)
self.norm = nn.GroupNorm(norm_groups, chout)
self.activation = activation()
self.res_blocks = nn.Sequential(
*[ResBlock(chout, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
for idx in range(res_blocks)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, T = x.shape
stride, = self.conv.stride
pad = (stride - (T % stride)) % stride
x = F.pad(x, (0, pad))
x = self.conv(x)
x = self.norm(x)
x = self.activation(x)
x = self.res_blocks(x)
return x
class BLSTM(nn.Module):
"""BiLSTM with same hidden units as input dim.
"""
def __init__(self, dim, layers=2):
super().__init__()
self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim)
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x):
x = x.permute(2, 0, 1)
x = self.lstm(x)[0]
x = self.linear(x)
x = x.permute(1, 2, 0)
return x
class DiffusionUnet(nn.Module):
def __init__(self, chin: int = 3, hidden: int = 24, depth: int = 3, growth: float = 2.,
max_channels: int = 10_000, num_steps: int = 1000, emb_all_layers=False, cross_attention: bool = False,
bilstm: bool = False, transformer: bool = False,
codec_dim: tp.Optional[int] = None, **kwargs):
super().__init__()
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
self.embeddings: tp.Optional[nn.ModuleList] = None
self.embedding = nn.Embedding(num_steps, hidden)
if emb_all_layers:
self.embeddings = nn.ModuleList()
self.condition_embedding: tp.Optional[nn.Module] = None
for d in range(depth):
encoder = EncoderLayer(chin, hidden, **kwargs)
decoder = DecoderLayer(hidden, chin, **kwargs)
self.encoders.append(encoder)
self.decoders.insert(0, decoder)
if emb_all_layers and d > 0:
assert self.embeddings is not None
self.embeddings.append(nn.Embedding(num_steps, hidden))
chin = hidden
hidden = min(int(chin * growth), max_channels)
self.bilstm: tp.Optional[nn.Module]
if bilstm:
self.bilstm = BLSTM(chin)
else:
self.bilstm = None
self.use_transformer = transformer
self.cross_attention = False
if transformer:
self.cross_attention = cross_attention
self.transformer = StreamingTransformer(chin, 8, 6, bias_ff=False, bias_attn=False,
cross_attention=cross_attention)
self.use_codec = False
if codec_dim is not None:
self.conv_codec = nn.Conv1d(codec_dim, chin, 1)
self.use_codec = True
def forward(self, x: torch.Tensor, step: tp.Union[int, torch.Tensor], condition: tp.Optional[torch.Tensor] = None):
skips = []
bs = x.size(0)
z = x
view_args = [1]
if type(step) is torch.Tensor:
step_tensor = step
else:
step_tensor = torch.tensor([step], device=x.device, dtype=torch.long).expand(bs)
for idx, encoder in enumerate(self.encoders):
z = encoder(z)
if idx == 0:
z = z + self.embedding(step_tensor).view(bs, -1, *view_args).expand_as(z)
elif self.embeddings is not None:
z = z + self.embeddings[idx - 1](step_tensor).view(bs, -1, *view_args).expand_as(z)
skips.append(z)
if self.use_codec: # insert condition in the bottleneck
assert condition is not None, "Model defined for conditionnal generation"
condition_emb = self.conv_codec(condition) # reshape to the bottleneck dim
assert condition_emb.size(-1) <= 2 * z.size(-1), \
f"You are downsampling the conditionning with factor >=2 : {condition_emb.size(-1)=} and {z.size(-1)=}"
if not self.cross_attention:
condition_emb = torch.nn.functional.interpolate(condition_emb, z.size(-1))
assert z.size() == condition_emb.size()
z += condition_emb
cross_attention_src = None
else:
cross_attention_src = condition_emb.permute(0, 2, 1) # B, T, C
B, T, C = cross_attention_src.shape
positions = torch.arange(T, device=x.device).view(1, -1, 1)
pos_emb = create_sin_embedding(positions, C, max_period=10_000, dtype=cross_attention_src.dtype)
cross_attention_src = cross_attention_src + pos_emb
if self.use_transformer:
z = self.transformer(z.permute(0, 2, 1), cross_attention_src=cross_attention_src).permute(0, 2, 1)
else:
if self.bilstm is None:
z = torch.zeros_like(z)
else:
z = self.bilstm(z)
for decoder in self.decoders:
s = skips.pop(-1)
z = z[:, :, :s.shape[2]]
z = z + s
z = decoder(z)
z = z[:, :, :x.shape[2]]
return Output(z)
| audiocraft-main | audiocraft/models/unet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility functions to load from the checkpoints.
Each checkpoint is a torch.saved dict with the following keys:
- 'xp.cfg': the hydra config as dumped during training. This should be used
to rebuild the object using the audiocraft.models.builders functions,
- 'model_best_state': a readily loadable best state for the model, including
the conditioner. The model obtained from `xp.cfg` should be compatible
with this state dict. In the case of a LM, the encodec model would not be
bundled along but instead provided separately.
Those functions also support loading from a remote location with the Torch Hub API.
They also support overriding some parameters, in particular the device and dtype
of the returned model.
"""
from pathlib import Path
from huggingface_hub import hf_hub_download
import typing as tp
import os
from omegaconf import OmegaConf, DictConfig
import torch
from . import builders
from .encodec import CompressionModel
def get_audiocraft_cache_dir() -> tp.Optional[str]:
return os.environ.get('AUDIOCRAFT_CACHE_DIR', None)
def _get_state_dict(
file_or_url_or_id: tp.Union[Path, str],
filename: tp.Optional[str] = None,
device='cpu',
cache_dir: tp.Optional[str] = None,
):
if cache_dir is None:
cache_dir = get_audiocraft_cache_dir()
# Return the state dict either from a file or url
file_or_url_or_id = str(file_or_url_or_id)
assert isinstance(file_or_url_or_id, str)
if os.path.isfile(file_or_url_or_id):
return torch.load(file_or_url_or_id, map_location=device)
if os.path.isdir(file_or_url_or_id):
file = f"{file_or_url_or_id}/{filename}"
return torch.load(file, map_location=device)
elif file_or_url_or_id.startswith('https://'):
return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
else:
assert filename is not None, "filename needs to be defined if using HF checkpoints"
file = hf_hub_download(repo_id=file_or_url_or_id, filename=filename, cache_dir=cache_dir)
return torch.load(file, map_location=device)
def load_compression_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
if 'pretrained' in pkg:
return CompressionModel.get_pretrained(pkg['pretrained'], device=device)
cfg = OmegaConf.create(pkg['xp.cfg'])
cfg.device = str(device)
model = builders.get_compression_model(cfg)
model.load_state_dict(pkg['best_state'])
model.eval()
return model
def load_lm_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
def _delete_param(cfg: DictConfig, full_name: str):
parts = full_name.split('.')
for part in parts[:-1]:
if part in cfg:
cfg = cfg[part]
else:
return
OmegaConf.set_struct(cfg, False)
if parts[-1] in cfg:
del cfg[parts[-1]]
OmegaConf.set_struct(cfg, True)
def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
cfg = OmegaConf.create(pkg['xp.cfg'])
cfg.device = str(device)
if cfg.device == 'cpu':
cfg.dtype = 'float32'
else:
cfg.dtype = 'float16'
_delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')
_delete_param(cfg, 'conditioners.args.merge_text_conditions_p')
_delete_param(cfg, 'conditioners.args.drop_desc_p')
model = builders.get_lm_model(cfg)
model.load_state_dict(pkg['best_state'])
model.eval()
model.cfg = cfg
return model
def load_mbd_ckpt(file_or_url_or_id: tp.Union[Path, str],
filename: tp.Optional[str] = None,
cache_dir: tp.Optional[str] = None):
return _get_state_dict(file_or_url_or_id, filename=filename, cache_dir=cache_dir)
def load_diffusion_models(file_or_url_or_id: tp.Union[Path, str],
device='cpu',
filename: tp.Optional[str] = None,
cache_dir: tp.Optional[str] = None):
pkg = load_mbd_ckpt(file_or_url_or_id, filename=filename, cache_dir=cache_dir)
models = []
processors = []
cfgs = []
sample_rate = pkg['sample_rate']
for i in range(pkg['n_bands']):
cfg = pkg[i]['cfg']
model = builders.get_diffusion_model(cfg)
model_dict = pkg[i]['model_state']
model.load_state_dict(model_dict)
model.to(device)
processor = builders.get_processor(cfg=cfg.processor, sample_rate=sample_rate)
processor_dict = pkg[i]['processor_state']
processor.load_state_dict(processor_dict)
processor.to(device)
models.append(model)
processors.append(processor)
cfgs.append(cfg)
return models, processors, cfgs
| audiocraft-main | audiocraft/models/loaders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Models for EnCodec, AudioGen, MusicGen, as well as the generic LMModel.
"""
# flake8: noqa
from . import builders, loaders
from .encodec import (
CompressionModel, EncodecModel, DAC,
HFEncodecModel, HFEncodecCompressionModel)
from .audiogen import AudioGen
from .lm import LMModel
from .multibanddiffusion import MultiBandDiffusion
from .musicgen import MusicGen
from .unet import DiffusionUnet
| audiocraft-main | audiocraft/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Multi Band Diffusion models as described in
"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion"
(paper link).
"""
import typing as tp
import torch
import julius
from .unet import DiffusionUnet
from ..modules.diffusion_schedule import NoiseSchedule
from .encodec import CompressionModel
from ..solvers.compression import CompressionSolver
from .loaders import load_compression_model, load_diffusion_models
class DiffusionProcess:
"""Sampling for a diffusion Model.
Args:
model (DiffusionUnet): Diffusion U-Net model.
noise_schedule (NoiseSchedule): Noise schedule for diffusion process.
"""
def __init__(self, model: DiffusionUnet, noise_schedule: NoiseSchedule) -> None:
"""
"""
self.model = model
self.schedule = noise_schedule
def generate(self, condition: torch.Tensor, initial_noise: torch.Tensor,
step_list: tp.Optional[tp.List[int]] = None):
"""Perform one diffusion process to generate one of the bands.
Args:
condition (tensor): The embeddings form the compression model.
initial_noise (tensor): The initial noise to start the process/
"""
return self.schedule.generate_subsampled(model=self.model, initial=initial_noise, step_list=step_list,
condition=condition)
class MultiBandDiffusion:
"""Sample from multiple diffusion models.
Args:
DPs (list of DiffusionProcess): Diffusion processes.
codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.
"""
def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:
self.DPs = DPs
self.codec_model = codec_model
self.device = next(self.codec_model.parameters()).device
@property
def sample_rate(self) -> int:
return self.codec_model.sample_rate
@staticmethod
def get_mbd_musicgen(device=None):
"""Load our diffusion models trained for MusicGen."""
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
path = 'facebook/multiband-diffusion'
filename = 'mbd_musicgen_32khz.th'
name = 'facebook/musicgen-small'
codec_model = load_compression_model(name, device=device)
models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)
DPs = []
for i in range(len(models)):
schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)
DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))
return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)
@staticmethod
def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,
device: tp.Optional[tp.Union[torch.device, str]] = None,
n_q: tp.Optional[int] = None):
"""Get the pretrained Models for MultibandDiffusion.
Args:
bw (float): Bandwidth of the compression model.
pretrained (bool): Whether to use / download if necessary the models.
device (torch.device or str, optional): Device on which the models are loaded.
n_q (int, optional): Number of quantizers to use within the compression model.
"""
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
assert bw in [1.5, 3.0, 6.0], f"bandwidth {bw} not available"
if n_q is not None:
assert n_q in [2, 4, 8]
assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \
f"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}"
n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]
codec_model = CompressionSolver.model_from_checkpoint(
'//pretrained/facebook/encodec_24khz', device=device)
codec_model.set_num_codebooks(n_q)
codec_model = codec_model.to(device)
path = 'facebook/multiband-diffusion'
filename = f'mbd_comp_{n_q}.pt'
models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)
DPs = []
for i in range(len(models)):
schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)
DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))
return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)
return MultiBandDiffusion(DPs, codec_model)
@torch.no_grad()
def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
"""Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.
Args:
wav (torch.Tensor): The audio that we want to extract the conditioning from
sample_rate (int): sample rate of the audio"""
if sample_rate != self.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.sample_rate)
codes, scale = self.codec_model.encode(wav)
assert scale is None, "Scaled compression models not supported."
emb = self.get_emb(codes)
return emb
@torch.no_grad()
def get_emb(self, codes: torch.Tensor):
"""Get latent representation from the discrete codes
Argrs:
codes (torch.Tensor): discrete tokens"""
emb = self.codec_model.decode_latent(codes)
return emb
def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,
step_list: tp.Optional[tp.List[int]] = None):
"""Generate Wavform audio from the latent embeddings of the compression model
Args:
emb (torch.Tensor): Conditioning embeddinds
size (none torch.Size): size of the output
if None this is computed from the typical upsampling of the model
step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.
"""
if size is None:
upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)
size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])
assert size[0] == emb.size(0)
out = torch.zeros(size).to(self.device)
for DP in self.DPs:
out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))
return out
def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):
"""match the eq to the encodec output by matching the standard deviation of some frequency bands
Args:
wav (torch.Tensor): audio to equalize
ref (torch.Tensor):refenrence audio from which we match the spectrogram.
n_bands (int): number of bands of the eq
strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.
"""
split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)
bands = split(wav)
bands_ref = split(ref)
out = torch.zeros_like(ref)
for i in range(n_bands):
out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness
return out
def regenerate(self, wav: torch.Tensor, sample_rate: int):
"""Regenerate a wavform through compression and diffusion regeneration.
Args:
wav (torch.Tensor): Original 'ground truth' audio
sample_rate (int): sample rate of the input (and output) wav
"""
if sample_rate != self.codec_model.sample_rate:
wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)
emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)
size = wav.size()
out = self.generate(emb, size=size)
if sample_rate != self.codec_model.sample_rate:
out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)
return out
def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):
"""Generate Waveform audio with diffusion from the discrete codes.
Args:
tokens (torch.Tensor): discrete codes
n_bands (int): bands for the eq matching.
"""
wav_encodec = self.codec_model.decode(tokens)
condition = self.get_emb(tokens)
wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())
return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)
| audiocraft-main | audiocraft/models/multibanddiffusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Main model for using AudioGen. This will combine all the required components
and provide easy access to the generation API.
"""
import typing as tp
import torch
from .encodec import CompressionModel
from .lm import LMModel
from .builders import get_debug_compression_model, get_debug_lm_model
from .loaders import load_compression_model, load_lm_model
from ..data.audio_utils import convert_audio
from ..modules.conditioners import ConditioningAttributes
from ..utils.autocast import TorchAutocast
class AudioGen:
"""AudioGen main model with convenient generation API.
Args:
name (str): name of the model.
compression_model (CompressionModel): Compression model
used to map audio to invertible discrete representations.
lm (LMModel): Language model over discrete representations.
max_duration (float, optional): maximum duration the model can produce,
otherwise, inferred from the training params.
"""
def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,
max_duration: tp.Optional[float] = None):
self.name = name
self.compression_model = compression_model
self.lm = lm
if max_duration is None:
if hasattr(lm, 'cfg'):
max_duration = lm.cfg.dataset.segment_duration # type: ignore
else:
raise ValueError("You must provide max_duration when building directly AudioGen")
assert max_duration is not None
self.max_duration: float = max_duration
self.device = next(iter(lm.parameters())).device
self.generation_params: dict = {}
self.set_generation_params(duration=5) # 5 seconds by default
self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None
if self.device.type == 'cpu':
self.autocast = TorchAutocast(enabled=False)
else:
self.autocast = TorchAutocast(
enabled=True, device_type=self.device.type, dtype=torch.float16)
@property
def frame_rate(self) -> float:
"""Roughly the number of AR steps per seconds."""
return self.compression_model.frame_rate
@property
def sample_rate(self) -> int:
"""Sample rate of the generated audio."""
return self.compression_model.sample_rate
@property
def audio_channels(self) -> int:
"""Audio channels of the generated audio."""
return self.compression_model.channels
@staticmethod
def get_pretrained(name: str = 'facebook/audiogen-medium', device=None):
"""Return pretrained model, we provide a single model for now:
- facebook/audiogen-medium (1.5B), text to sound,
# see: https://huggingface.co/facebook/audiogen-medium
"""
if device is None:
if torch.cuda.device_count():
device = 'cuda'
else:
device = 'cpu'
if name == 'debug':
# used only for unit tests
compression_model = get_debug_compression_model(device, sample_rate=16000)
lm = get_debug_lm_model(device)
return AudioGen(name, compression_model, lm, max_duration=10)
compression_model = load_compression_model(name, device=device)
lm = load_lm_model(name, device=device)
assert 'self_wav' not in lm.condition_provider.conditioners, \
"AudioGen do not support waveform conditioning for now"
return AudioGen(name, compression_model, lm)
def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,
top_p: float = 0.0, temperature: float = 1.0,
duration: float = 10.0, cfg_coef: float = 3.0,
two_step_cfg: bool = False, extend_stride: float = 2):
"""Set the generation parameters for AudioGen.
Args:
use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.
top_k (int, optional): top_k used for sampling. Defaults to 250.
top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.
temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.
duration (float, optional): Duration of the generated waveform. Defaults to 10.0.
cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.
two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,
instead of batching together the two. This has some impact on how things
are padded but seems to have little impact in practice.
extend_stride: when doing extended generation (i.e. more than 10 seconds), by how much
should we extend the audio each time. Larger values will mean less context is
preserved, and shorter value will require extra computations.
"""
assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration."
self.extend_stride = extend_stride
self.duration = duration
self.generation_params = {
'use_sampling': use_sampling,
'temp': temperature,
'top_k': top_k,
'top_p': top_p,
'cfg_coef': cfg_coef,
'two_step_cfg': two_step_cfg,
}
def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):
"""Override the default progress callback."""
self._progress_callback = progress_callback
def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor:
"""Generate samples conditioned on text.
Args:
descriptions (list of str): A list of strings used as text conditioning.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)
assert prompt_tokens is None
return self._generate_tokens(attributes, prompt_tokens, progress)
def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,
descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,
progress: bool = False) -> torch.Tensor:
"""Generate samples conditioned on audio prompts.
Args:
prompt (torch.Tensor): A batch of waveforms used for continuation.
Prompt should be [B, C, T], or [C, T] if only one sample is generated.
prompt_sample_rate (int): Sampling rate of the given audio waveforms.
descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
"""
if prompt.dim() == 2:
prompt = prompt[None]
if prompt.dim() != 3:
raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).")
prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)
if descriptions is None:
descriptions = [None] * len(prompt)
attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)
assert prompt_tokens is not None
return self._generate_tokens(attributes, prompt_tokens, progress)
@torch.no_grad()
def _prepare_tokens_and_attributes(
self,
descriptions: tp.Sequence[tp.Optional[str]],
prompt: tp.Optional[torch.Tensor],
) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:
"""Prepare model inputs.
Args:
descriptions (list of str): A list of strings used as text conditioning.
prompt (torch.Tensor): A batch of waveforms used for continuation.
"""
attributes = [
ConditioningAttributes(text={'description': description})
for description in descriptions]
if prompt is not None:
if descriptions is not None:
assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match"
prompt = prompt.to(self.device)
prompt_tokens, scale = self.compression_model.encode(prompt)
assert scale is None
else:
prompt_tokens = None
return attributes, prompt_tokens
def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],
prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:
"""Generate discrete audio tokens given audio prompt and/or conditions.
Args:
attributes (list of ConditioningAttributes): Conditions used for generation (here text).
prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.
progress (bool, optional): Flag to display progress of the generation process. Defaults to False.
Returns:
torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.
"""
total_gen_len = int(self.duration * self.frame_rate)
max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
current_gen_offset: int = 0
def _progress_callback(generated_tokens: int, tokens_to_generate: int):
generated_tokens += current_gen_offset
if self._progress_callback is not None:
# Note that total_gen_len might be quite wrong depending on the
# codebook pattern used, but with delay it is almost accurate.
self._progress_callback(generated_tokens, total_gen_len)
else:
print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r')
if prompt_tokens is not None:
assert max_prompt_len >= prompt_tokens.shape[-1], \
"Prompt is longer than audio to generate"
callback = None
if progress:
callback = _progress_callback
if self.duration <= self.max_duration:
# generate by sampling from LM, simple case.
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=total_gen_len, **self.generation_params)
else:
all_tokens = []
if prompt_tokens is None:
prompt_length = 0
else:
all_tokens.append(prompt_tokens)
prompt_length = prompt_tokens.shape[-1]
stride_tokens = int(self.frame_rate * self.extend_stride)
while current_gen_offset + prompt_length < total_gen_len:
time_offset = current_gen_offset / self.frame_rate
chunk_duration = min(self.duration - time_offset, self.max_duration)
max_gen_len = int(chunk_duration * self.frame_rate)
with self.autocast:
gen_tokens = self.lm.generate(
prompt_tokens, attributes,
callback=callback, max_gen_len=max_gen_len, **self.generation_params)
if prompt_tokens is None:
all_tokens.append(gen_tokens)
else:
all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])
prompt_tokens = gen_tokens[:, :, stride_tokens:]
prompt_length = prompt_tokens.shape[-1]
current_gen_offset += stride_tokens
gen_tokens = torch.cat(all_tokens, dim=-1)
# generate audio
assert gen_tokens.dim() == 3
with torch.no_grad():
gen_audio = self.compression_model.decode(gen_tokens, None)
return gen_audio
| audiocraft-main | audiocraft/models/audiogen.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from functools import partial
import logging
import math
import typing as tp
import torch
from torch import nn
from ..utils import utils
from ..modules.streaming import StreamingModule, State
from ..modules.transformer import StreamingTransformer, create_norm_fn
from ..modules.conditioners import (
ConditionFuser,
ClassifierFreeGuidanceDropout,
AttributeDropout,
ConditioningProvider,
ConditioningAttributes,
ConditionType,
)
from ..modules.codebooks_patterns import CodebooksPatternProvider
from ..modules.activations import get_activation_fn
logger = logging.getLogger(__name__)
ConditionTensors = tp.Dict[str, ConditionType]
CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
"""LM layer initialization.
Inspired from xlformers: https://github.com/fairinternal/xlformers
Args:
method (str): Method name for init function. Valid options are:
'gaussian', 'uniform'.
input_dim (int): Input dimension of the initialized module.
init_depth (int, optional): Optional init depth value used to rescale
the standard deviation if defined.
"""
# Compute std
std = 1 / math.sqrt(input_dim)
# Rescale with depth
if init_depth is not None:
std = std / math.sqrt(2 * init_depth)
if method == 'gaussian':
return partial(
torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
)
elif method == 'uniform':
bound = math.sqrt(3) * std # ensure the standard deviation is `std`
return partial(torch.nn.init.uniform_, a=-bound, b=bound)
else:
raise ValueError("Unsupported layer initialization method")
def init_layer(m: nn.Module,
method: str,
init_depth: tp.Optional[int] = None,
zero_bias_init: bool = False):
"""Wrapper around ``get_init_fn`` for proper initialization of LM modules.
Args:
m (nn.Module): Module to initialize.
method (str): Method name for the init function.
init_depth (int, optional): Optional init depth value used to rescale
the standard deviation if defined.
zero_bias_init (bool): Whether to initialize the bias to 0 or not.
"""
if isinstance(m, nn.Linear):
init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
weight = m.weight.float()
init_fn(weight)
m.weight.data[:] = weight.half()
else:
init_fn(m.weight)
if zero_bias_init and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Embedding):
init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
weight = m.weight.float()
init_fn(weight)
m.weight.data[:] = weight.half()
else:
init_fn(m.weight)
class ScaledEmbedding(nn.Embedding):
"""Boost learning rate for embeddings (with `scale`).
"""
def __init__(self, *args, lr=None, **kwargs):
super().__init__(*args, **kwargs)
self.lr = lr
def make_optim_group(self):
group = {"params": list(self.parameters())}
if self.lr is not None:
group["lr"] = self.lr
return group
@dataclass
class LMOutput:
# The logits are already re-aligned with the input codes
# hence no extra shift is required, e.g. when computing CE
logits: torch.Tensor # [B, K, T, card]
mask: torch.Tensor # [B, K, T]
class LMModel(StreamingModule):
"""Transformer-based language model on multiple streams of codes.
Args:
pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
n_q (int): Number of parallel streams to model.
card (int): Cardinality, vocabulary size.
dim (int): Dimension of the transformer encoder.
num_heads (int): Number of heads for the transformer encoder.
hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
norm (str): Normalization method.
norm_first (bool): Use pre-norm instead of post-norm.
emb_lr (float, optional): Embedding-specific learning rate.
bias_proj (bool): Use bias for output projections.
weight_init (str, optional): Method for weight initialization.
depthwise_init (str, optional): Method for depthwise weight initialization.
zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
cfg_dropout (float): Classifier-free guidance dropout.
cfg_coef (float): Classifier-free guidance coefficient.
attribute_dropout (dict): Attribute dropout probabilities.
two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
**kwargs: Additional parameters for the transformer encoder.
"""
def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
**kwargs):
super().__init__()
self.cfg_coef = cfg_coef
self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
self.att_dropout = AttributeDropout(p=attribute_dropout)
self.condition_provider = condition_provider
self.fuser = fuser
self.card = card
embed_dim = self.card + 1
self.n_q = n_q
self.dim = dim
self.pattern_provider = pattern_provider
self.two_step_cfg = two_step_cfg
self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
if 'activation' in kwargs:
kwargs['activation'] = get_activation_fn(kwargs['activation'])
self.transformer = StreamingTransformer(
d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
norm=norm, norm_first=norm_first, **kwargs)
self.out_norm: tp.Optional[nn.Module] = None
if norm_first:
self.out_norm = create_norm_fn(norm, dim)
self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
self._init_weights(weight_init, depthwise_init, zero_bias_init)
self._fsdp: tp.Optional[nn.Module]
self.__dict__['_fsdp'] = None
def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
"""Initialization of the transformer module weights.
Args:
weight_init (str, optional): Weight initialization strategy. See ``get_init_fn`` for valid options.
depthwise_init (str, optional): Depthwise initialization strategy. The following options are valid:
'current' where the depth corresponds to the current layer index or 'global' where the total number
of layer is used as depth. If not set, no depthwise initialization strategy is used.
zero_bias_init (bool): Whether to initialize bias to zero or not.
"""
assert depthwise_init is None or depthwise_init in ['current', 'global']
assert depthwise_init is None or weight_init is not None, \
"If 'depthwise_init' is defined, a 'weight_init' method should be provided."
assert not zero_bias_init or weight_init is not None, \
"If 'zero_bias_init', a 'weight_init' method should be provided"
if weight_init is None:
return
for emb_layer in self.emb:
init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
for layer_idx, tr_layer in enumerate(self.transformer.layers):
depth = None
if depthwise_init == 'current':
depth = layer_idx + 1
elif depthwise_init == 'global':
depth = len(self.transformer.layers)
init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
tr_layer.apply(init_fn)
for linear in self.linears:
init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
@property
def special_token_id(self) -> int:
return self.card
@property
def num_codebooks(self) -> int:
return self.n_q
def forward(self, sequence: torch.Tensor,
conditions: tp.List[ConditioningAttributes],
condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
"""Apply language model on sequence and conditions.
Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
S the sequence steps, return the logits with shape [B, card, K, S].
Args:
indices (torch.Tensor): Indices of the codes to model.
conditions (list of ConditioningAttributes): Conditions to use when modeling
the given codes. Note that when evaluating multiple time with the same conditioning
you should pre-compute those and pass them as `condition_tensors`.
condition_tensors (dict[str, ConditionType], optional): Pre-computed conditioning
tensors, see `conditions`.
Returns:
torch.Tensor: Logits.
"""
B, K, S = sequence.shape
assert K == self.num_codebooks, "Sequence shape must match the specified number of codebooks"
input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
if condition_tensors is None:
assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
# apply dropout modules
conditions = self.cfg_dropout(conditions)
conditions = self.att_dropout(conditions)
tokenized = self.condition_provider.tokenize(conditions)
# encode conditions and fuse, both have a streaming cache to not recompute when generating.
condition_tensors = self.condition_provider(tokenized)
else:
assert not conditions, "Shouldn't pass both conditions and condition_tensors."
input_, cross_attention_input = self.fuser(input_, condition_tensors)
out = self.transformer(input_, cross_attention_src=cross_attention_input)
if self.out_norm:
out = self.out_norm(out)
logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
# remove the prefix from the model outputs
if len(self.fuser.fuse2cond['prepend']) > 0:
logits = logits[:, :, -S:]
return logits # [B, K, S, card]
def compute_predictions(
self, codes: torch.Tensor,
conditions: tp.List[ConditioningAttributes],
condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
"""Given an input tensor of codes [B, K, T] and list of conditions, runs the model
forward using the specified codes interleaving pattern.
Args:
codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
K the number of codebooks and T the number of timesteps.
conditions (list of ConditioningAttributes): conditionings to use when modeling
the given codes. Note that when evaluating multiple time with the same conditioning
you should pre-compute those and pass them as `condition_tensors`.
condition_tensors (dict[str, ConditionType], optional): pre-computed conditioning
tensors, see `conditions`.
Returns:
LMOutput: Language model outputs
logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
i.e. the first item corresponds to logits to predict the first code, meaning that
no additional shifting of codes and logits is required.
mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
Given the specified interleaving strategies, parts of the logits and codes should
not be considered as valid predictions because of invalid context.
"""
B, K, T = codes.shape
codes = codes.contiguous()
# map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
pattern = self.pattern_provider.get_pattern(T)
sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
codes, self.special_token_id, keep_only_valid_steps=True
)
# apply model on pattern sequence
model = self if self._fsdp is None else self._fsdp
logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
# map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
# and provide the corresponding mask over invalid positions of tokens
logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
# note: we use nans as special token to make it obvious if we feed unexpected logits
logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
logits, float('nan'), keep_only_valid_steps=True
)
logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
return LMOutput(logits, logits_mask)
def _sample_next_token(self,
sequence: torch.Tensor,
cfg_conditions: CFGConditions,
unconditional_state: State,
use_sampling: bool = False,
temp: float = 1.0,
top_k: int = 0,
top_p: float = 0.0,
cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
"""Sample next token from the model given a sequence and a set of conditions. The model supports
multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
Args:
sequence (torch.Tensor): Current sequence of shape [B, K, S]
with K corresponding to the number of codebooks and S the number of sequence steps.
S = 1 in streaming mode, except for the first step that contains a bigger prompt.
condition_tensors (dict[str, ConditionType): Set of conditions. If CFG is used,
should be twice the batch size, being the concatenation of the conditions + null conditions.
use_sampling (bool): Whether to use a sampling strategy or not.
temp (float): Sampling temperature.
top_k (int): K for "top-k" sampling.
top_p (float): P for "top-p" sampling.
cfg_coef (float, optional): classifier free guidance coefficient
Returns:
next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
"""
B = sequence.shape[0]
cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
model = self if self._fsdp is None else self._fsdp
if self.two_step_cfg and cfg_conditions != {}:
assert isinstance(cfg_conditions, tuple), type(cfg_conditions)
condition_tensors, null_condition_tensors = cfg_conditions
cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
state = self.get_streaming_state()
self.set_streaming_state(unconditional_state)
uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
unconditional_state.update(self.get_streaming_state())
self.set_streaming_state(state)
logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
else:
assert isinstance(cfg_conditions, dict)
condition_tensors = cfg_conditions
if condition_tensors:
# Preparing for CFG, predicting both conditional and unconditional logits.
sequence = torch.cat([sequence, sequence], dim=0)
all_logits = model(
sequence,
conditions=[], condition_tensors=condition_tensors)
if condition_tensors:
cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
else:
logits = all_logits
logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
logits = logits[..., -1] # [B x K x card]
# Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
if use_sampling and temp > 0.0:
probs = torch.softmax(logits / temp, dim=-1)
if top_p > 0.0:
next_token = utils.sample_top_p(probs, p=top_p)
elif top_k > 0:
next_token = utils.sample_top_k(probs, k=top_k)
else:
next_token = utils.multinomial(probs, num_samples=1)
else:
next_token = torch.argmax(logits, dim=-1, keepdim=True)
return next_token
@torch.no_grad()
def generate(self,
prompt: tp.Optional[torch.Tensor] = None,
conditions: tp.List[ConditioningAttributes] = [],
num_samples: tp.Optional[int] = None,
max_gen_len: int = 256,
use_sampling: bool = True,
temp: float = 1.0,
top_k: int = 250,
top_p: float = 0.0,
cfg_coef: tp.Optional[float] = None,
two_step_cfg: tp.Optional[bool] = None,
remove_prompts: bool = False,
check: bool = False,
callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
"""Generate tokens sampling from the model given a prompt or unconditionally. Generation can
be perform in a greedy fashion or using sampling with top K and top P strategies.
Args:
prompt (torch.Tensor, optional): Prompt tokens of shape [B, K, T].
conditions_tensors (list of ConditioningAttributes, optional): List of conditions.
num_samples (int, optional): Number of samples to generate when no prompt and no conditions are given.
max_gen_len (int): Maximum generation length.
use_sampling (bool): Whether to use a sampling strategy or not.
temp (float): Sampling temperature.
top_k (int): K for "top-k" sampling.
top_p (float): P for "top-p" sampling.
cfg_coeff (float, optional): Classifier-free guidance coefficient.
two_step_cfg (bool, optional): Whether to perform classifier-free guidance with two steps generation.
remove_prompts (bool): Whether to remove prompts from generation or not.
check (bool): Whether to apply further checks on generated sequence.
callback (Callback, optional): Callback function to report generation progress.
Returns:
torch.Tensor: Generated tokens.
"""
assert not self.training, "generation shouldn't be used in training mode."
first_param = next(iter(self.parameters()))
device = first_param.device
# Checking all input shapes are consistent.
possible_num_samples = []
if num_samples is not None:
possible_num_samples.append(num_samples)
elif prompt is not None:
possible_num_samples.append(prompt.shape[0])
elif conditions:
possible_num_samples.append(len(conditions))
else:
possible_num_samples.append(1)
assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsistent inputs shapes"
num_samples = possible_num_samples[0]
# below we create set of conditions: one conditional and one unconditional
# to do that we merge the regular condition together with the null condition
# we then do 1 forward pass instead of 2.
# the reason for that is two-fold:
# 1. it is about x2 faster than doing 2 forward passes
# 2. avoid the streaming API treating the 2 passes as part of different time steps
# We also support doing two different passes, in particular to ensure that
# the padding structure is exactly the same between train and test.
# With a batch size of 1, this can be slower though.
cfg_conditions: CFGConditions
two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
if conditions:
null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
if two_step_cfg:
cfg_conditions = (
self.condition_provider(self.condition_provider.tokenize(conditions)),
self.condition_provider(self.condition_provider.tokenize(null_conditions)),
)
else:
conditions = conditions + null_conditions
tokenized = self.condition_provider.tokenize(conditions)
cfg_conditions = self.condition_provider(tokenized)
else:
cfg_conditions = {}
if prompt is None:
assert num_samples > 0
prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
B, K, T = prompt.shape
start_offset = T
assert start_offset < max_gen_len
pattern = self.pattern_provider.get_pattern(max_gen_len)
# this token is used as default value for codes that are not generated yet
unknown_token = -1
# we generate codes up to the max_gen_len that will be mapped to the pattern sequence
gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
# filling the gen_codes with the prompt if needed
gen_codes[..., :start_offset] = prompt
# create the gen_sequence with proper interleaving from the pattern: [B, K, S]
gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
# retrieve the start_offset in the sequence:
# it is the first sequence step that contains the `start_offset` timestep
start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
assert start_offset_sequence is not None
with self.streaming():
unconditional_state = self.get_streaming_state()
prev_offset = 0
gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
for offset in range(start_offset_sequence, gen_sequence_len):
# get current sequence (note that the streaming API is providing the caching over previous offsets)
curr_sequence = gen_sequence[..., prev_offset:offset]
curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
if check:
# check coherence between mask and sequence
assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
# should never happen as gen_sequence is filled progressively
assert not (curr_sequence == unknown_token).any()
# sample next token from the model, next token shape is [B, K, 1]
next_token = self._sample_next_token(
curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
cfg_coef=cfg_coef)
# ensure the tokens that should be masked are properly set to special_token_id
# as the model never output special_token_id
valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
next_token[~valid_mask] = self.special_token_id
# ensure we don't overwrite prompt tokens, we only write over unknown tokens
# (then mask tokens should be left as is as well, which is correct)
gen_sequence[..., offset:offset+1] = torch.where(
gen_sequence[..., offset:offset+1] == unknown_token,
next_token, gen_sequence[..., offset:offset+1]
)
prev_offset = offset
if callback is not None:
callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
unconditional_state.clear()
# ensure sequence has been entirely filled
assert not (gen_sequence == unknown_token).any()
# ensure gen_sequence pattern and mask are matching
# which means the gen_sequence is valid according to the pattern
assert (
gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
).all()
# get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
# sanity checks over the returned codes and corresponding masks
assert (out_codes[..., :max_gen_len] != unknown_token).all()
assert (out_mask[..., :max_gen_len] == 1).all()
out_start_offset = start_offset if remove_prompts else 0
out_codes = out_codes[..., out_start_offset:max_gen_len]
# ensure the returned codes are all valid
assert (out_codes >= 0).all() and (out_codes <= self.card).all()
return out_codes
| audiocraft-main | audiocraft/models/lm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Compression models or wrapper around existing models.
Also defines the main interface that a model must follow to be usable as an audio tokenizer.
"""
from abc import ABC, abstractmethod
import logging
import math
from pathlib import Path
import typing as tp
import numpy as np
import torch
from torch import nn
from transformers import EncodecModel as HFEncodecModel
from .. import quantization as qt
logger = logging.getLogger()
class CompressionModel(ABC, nn.Module):
"""Base API for all compression model that aim at being used as audio tokenizers
with a language model.
"""
@abstractmethod
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
...
@abstractmethod
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
"""See `EncodecModel.encode`."""
...
@abstractmethod
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
"""See `EncodecModel.decode`."""
...
@abstractmethod
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
...
@property
@abstractmethod
def channels(self) -> int:
...
@property
@abstractmethod
def frame_rate(self) -> float:
...
@property
@abstractmethod
def sample_rate(self) -> int:
...
@property
@abstractmethod
def cardinality(self) -> int:
...
@property
@abstractmethod
def num_codebooks(self) -> int:
...
@property
@abstractmethod
def total_codebooks(self) -> int:
...
@abstractmethod
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer."""
...
@staticmethod
def get_pretrained(
name: str, device: tp.Union[torch.device, str] = 'cpu'
) -> 'CompressionModel':
"""Instantiate a CompressionModel from a given pretrained model.
Args:
name (Path or str): name of the pretrained model. See after.
device (torch.device or str): Device on which the model is loaded.
Pretrained models:
- dac_44khz (https://github.com/descriptinc/descript-audio-codec)
- dac_24khz (same)
- facebook/encodec_24khz (https://huggingface.co/facebook/encodec_24khz)
- facebook/encodec_32khz (https://huggingface.co/facebook/encodec_32khz)
- your own model on HugginFace. Export instructions to come...
"""
from . import builders, loaders
model: CompressionModel
if name in ['dac_44khz', 'dac_24khz']:
model_type = name.split('_')[1]
logger.info("Getting pretrained compression model from DAC %s", model_type)
model = DAC(model_type)
elif name in ['debug_compression_model']:
logger.info("Getting pretrained compression model for debug")
model = builders.get_debug_compression_model()
elif Path(name).exists():
# We assume here if the paths exist that it is in fact an AC checkpoint
# that was exported using `audiocraft.utils.export` functions.
model = loaders.load_compression_model(name, device=device)
else:
logger.info("Getting pretrained compression model from HF %s", name)
hf_model = HFEncodecModel.from_pretrained(name)
model = HFEncodecCompressionModel(hf_model).to(device)
return model.to(device).eval()
class EncodecModel(CompressionModel):
"""Encodec model operating on the raw waveform.
Args:
encoder (nn.Module): Encoder network.
decoder (nn.Module): Decoder network.
quantizer (qt.BaseQuantizer): Quantizer network.
frame_rate (int): Frame rate for the latent representation.
sample_rate (int): Audio sample rate.
channels (int): Number of audio channels.
causal (bool): Whether to use a causal version of the model.
renormalize (bool): Whether to renormalize the audio before running the model.
"""
# we need assignment to override the property in the abstract class,
# I couldn't find a better way...
frame_rate: float = 0
sample_rate: int = 0
channels: int = 0
def __init__(self,
encoder: nn.Module,
decoder: nn.Module,
quantizer: qt.BaseQuantizer,
frame_rate: int,
sample_rate: int,
channels: int,
causal: bool = False,
renormalize: bool = False):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.quantizer = quantizer
self.frame_rate = frame_rate
self.sample_rate = sample_rate
self.channels = channels
self.renormalize = renormalize
self.causal = causal
if self.causal:
# we force disabling here to avoid handling linear overlap of segments
# as supported in original EnCodec codebase.
assert not self.renormalize, 'Causal model does not support renormalize'
@property
def total_codebooks(self):
"""Total number of quantizer codebooks available."""
return self.quantizer.total_codebooks
@property
def num_codebooks(self):
"""Active number of codebooks used by the quantizer."""
return self.quantizer.num_codebooks
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer."""
self.quantizer.set_num_codebooks(n)
@property
def cardinality(self):
"""Cardinality of each codebook."""
return self.quantizer.bins
def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
scale: tp.Optional[torch.Tensor]
if self.renormalize:
mono = x.mean(dim=1, keepdim=True)
volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
scale = 1e-8 + volume
x = x / scale
scale = scale.view(-1, 1)
else:
scale = None
return x, scale
def postprocess(self,
x: torch.Tensor,
scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
if scale is not None:
assert self.renormalize
x = x * scale.view(-1, 1, 1)
return x
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
assert x.dim() == 3
length = x.shape[-1]
x, scale = self.preprocess(x)
emb = self.encoder(x)
q_res = self.quantizer(emb, self.frame_rate)
out = self.decoder(q_res.x)
# remove extra padding added by the encoder and decoder
assert out.shape[-1] >= length, (out.shape[-1], length)
out = out[..., :length]
q_res.x = self.postprocess(out, scale)
return q_res
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
"""Encode the given input tensor to quantized representation along with scale parameter.
Args:
x (torch.Tensor): Float tensor of shape [B, C, T]
Returns:
codes, scale (tuple of torch.Tensor, torch.Tensor): Tuple composed of:
codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
scale a float tensor containing the scale for audio renormalizealization.
"""
assert x.dim() == 3
x, scale = self.preprocess(x)
emb = self.encoder(x)
codes = self.quantizer.encode(emb)
return codes, scale
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
"""Decode the given codes to a reconstructed representation, using the scale to perform
audio denormalization if needed.
Args:
codes (torch.Tensor): Int tensor of shape [B, K, T]
scale (torch.Tensor, optional): Float tensor containing the scale value.
Returns:
out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
"""
emb = self.decode_latent(codes)
out = self.decoder(emb)
out = self.postprocess(out, scale)
# out contains extra padding added by the encoder and decoder
return out
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.quantizer.decode(codes)
class DAC(CompressionModel):
def __init__(self, model_type: str = "44khz"):
super().__init__()
try:
import dac.utils
except ImportError:
raise RuntimeError("Could not import dac, make sure it is installed, "
"please run `pip install descript-audio-codec`")
self.model = dac.utils.load_model(model_type=model_type)
self.n_quantizers = self.total_codebooks
self.model.eval()
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
# We don't support training with this.
raise NotImplementedError("Forward and training with DAC not supported.")
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
codes = self.model.encode(x, self.n_quantizers)[1]
return codes, None
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
assert scale is None
z_q = self.decode_latent(codes)
return self.model.decode(z_q)
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.model.quantizer.from_codes(codes)[0]
@property
def channels(self) -> int:
return 1
@property
def frame_rate(self) -> float:
return self.model.sample_rate / self.model.hop_length
@property
def sample_rate(self) -> int:
return self.model.sample_rate
@property
def cardinality(self) -> int:
return self.model.codebook_size
@property
def num_codebooks(self) -> int:
return self.n_quantizers
@property
def total_codebooks(self) -> int:
return self.model.n_codebooks
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer.
"""
assert n >= 1
assert n <= self.total_codebooks
self.n_quantizers = n
class HFEncodecCompressionModel(CompressionModel):
"""Wrapper around HuggingFace Encodec.
"""
def __init__(self, model: HFEncodecModel):
super().__init__()
self.model = model
bws = self.model.config.target_bandwidths
num_codebooks = [
bw * 1000 / (self.frame_rate * math.log2(self.cardinality))
for bw in bws
]
deltas = [nc - int(nc) for nc in num_codebooks]
# Checking we didn't do some bad maths and we indeed have integers!
assert all(deltas) <= 1e-3, deltas
self.possible_num_codebooks = [int(nc) for nc in num_codebooks]
self.set_num_codebooks(max(self.possible_num_codebooks))
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
# We don't support training with this.
raise NotImplementedError("Forward and training with HF EncodecModel not supported.")
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
bandwidth_index = self.possible_num_codebooks.index(self.num_codebooks)
bandwidth = self.model.config.target_bandwidths[bandwidth_index]
res = self.model.encode(x, None, bandwidth)
assert len(res[0]) == 1
assert len(res[1]) == 1
return res[0][0], res[1][0]
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
if scale is None:
scales = [None] # type: ignore
else:
scales = scale # type: ignore
res = self.model.decode(codes[None], scales)
return res[0]
def decode_latent(self, codes: torch.Tensor):
"""Decode from the discrete codes to continuous latent space."""
return self.model.quantizer.decode(codes.transpose(0, 1))
@property
def channels(self) -> int:
return self.model.config.audio_channels
@property
def frame_rate(self) -> float:
hop_length = int(np.prod(self.model.config.upsampling_ratios))
return self.sample_rate / hop_length
@property
def sample_rate(self) -> int:
return self.model.config.sampling_rate
@property
def cardinality(self) -> int:
return self.model.config.codebook_size
@property
def num_codebooks(self) -> int:
return self._num_codebooks
@property
def total_codebooks(self) -> int:
return max(self.possible_num_codebooks)
def set_num_codebooks(self, n: int):
"""Set the active number of codebooks used by the quantizer.
"""
if n not in self.possible_num_codebooks:
raise ValueError(f"Allowed values for num codebooks: {self.possible_num_codebooks}")
self._num_codebooks = n
| audiocraft-main | audiocraft/models/encodec.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import torch
from .base import BaseQuantizer, QuantizedResult
from .core_vq import ResidualVectorQuantization
class ResidualVectorQuantizer(BaseQuantizer):
"""Residual Vector Quantizer.
Args:
dimension (int): Dimension of the codebooks.
n_q (int): Number of residual vector quantizers used.
q_dropout (bool): Random quantizer drop out at train time.
bins (int): Codebook size.
decay (float): Decay for exponential moving average over the codebooks.
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
kmeans_iters (int): Number of iterations used for kmeans initialization.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
orthogonal_reg_weight (float): Orthogonal regularization weights.
orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
orthogonal_reg_max_codes (optional int): Maximum number of codes to consider.
for orthogonal regularization.
"""
def __init__(
self,
dimension: int = 256,
n_q: int = 8,
q_dropout: bool = False,
bins: int = 1024,
decay: float = 0.99,
kmeans_init: bool = True,
kmeans_iters: int = 10,
threshold_ema_dead_code: int = 2,
orthogonal_reg_weight: float = 0.0,
orthogonal_reg_active_codes_only: bool = False,
orthogonal_reg_max_codes: tp.Optional[int] = None,
):
super().__init__()
self.max_n_q = n_q
self.n_q = n_q
self.q_dropout = q_dropout
self.dimension = dimension
self.bins = bins
self.decay = decay
self.kmeans_init = kmeans_init
self.kmeans_iters = kmeans_iters
self.threshold_ema_dead_code = threshold_ema_dead_code
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
self.vq = ResidualVectorQuantization(
dim=self.dimension,
codebook_size=self.bins,
num_quantizers=self.n_q,
decay=self.decay,
kmeans_init=self.kmeans_init,
kmeans_iters=self.kmeans_iters,
threshold_ema_dead_code=self.threshold_ema_dead_code,
orthogonal_reg_weight=self.orthogonal_reg_weight,
orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only,
orthogonal_reg_max_codes=self.orthogonal_reg_max_codes,
channels_last=False
)
def forward(self, x: torch.Tensor, frame_rate: int):
n_q = self.n_q
if self.training and self.q_dropout:
n_q = int(torch.randint(1, self.n_q + 1, (1,)).item())
bw_per_q = math.log2(self.bins) * frame_rate / 1000
quantized, codes, commit_loss = self.vq(x, n_q=n_q)
codes = codes.transpose(0, 1)
# codes is [B, K, T], with T frames, K nb of codebooks.
bw = torch.tensor(n_q * bw_per_q).to(x)
return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss))
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified frame rate at the given bandwidth.
The RVQ encode method sets the appropriate number of quantizer to use
and returns indices for each quantizer.
"""
n_q = self.n_q
codes = self.vq.encode(x, n_q=n_q)
codes = codes.transpose(0, 1)
# codes is [B, K, T], with T frames, K nb of codebooks.
return codes
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
# codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T].
codes = codes.transpose(0, 1)
quantized = self.vq.decode(codes)
return quantized
@property
def total_codebooks(self):
return self.max_n_q
@property
def num_codebooks(self):
return self.n_q
def set_num_codebooks(self, n: int):
assert n > 0 and n <= self.max_n_q
self.n_q = n
| audiocraft-main | audiocraft/quantization/vq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""RVQ."""
# flake8: noqa
from .vq import ResidualVectorQuantizer
from .base import BaseQuantizer, DummyQuantizer, QuantizedResult
| audiocraft-main | audiocraft/quantization/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from einops import rearrange, repeat
import flashy
import torch
from torch import nn, einsum
import torch.nn.functional as F
def exists(val: tp.Optional[tp.Any]) -> bool:
return val is not None
def default(val: tp.Any, d: tp.Any) -> tp.Any:
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, p=2, dim=-1)
def ema_inplace(moving_avg, new, decay: float):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
return (x + epsilon) / (x.sum() + n_categories * epsilon)
def uniform_init(*shape: int):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def sample_vectors(samples, num: int):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = torch.randperm(num_samples, device=device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device=device)
return samples[indices]
def kmeans(samples, num_clusters: int, num_iters: int = 10):
dim, dtype = samples.shape[-1], samples.dtype
means = sample_vectors(samples, num_clusters)
for _ in range(num_iters):
diffs = rearrange(samples, "n d -> n () d") - rearrange(
means, "c d -> () c d"
)
dists = -(diffs ** 2).sum(dim=-1)
buckets = dists.max(dim=-1).indices
bins = torch.bincount(buckets, minlength=num_clusters)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
new_means = new_means / bins_min_clamped[..., None]
means = torch.where(zero_mask[..., None], means, new_means)
return means, bins
def orthogonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
n = t.shape[0]
normed_codes = l2norm(t)
identity = torch.eye(n, device=t.device)
cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
class EuclideanCodebook(nn.Module):
"""Codebook with Euclidean distance.
Args:
dim (int): Dimension.
codebook_size (int): Codebook size.
kmeans_init (bool): Whether to use k-means to initialize the codebooks.
If set to true, run the k-means algorithm on the first training batch and use
the learned centroids as initialization.
kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
decay (float): Decay for exponential moving average over the codebooks.
epsilon (float): Epsilon value for numerical stability.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
"""
def __init__(
self,
dim: int,
codebook_size: int,
kmeans_init: int = False,
kmeans_iters: int = 10,
decay: float = 0.8,
epsilon: float = 1e-5,
threshold_ema_dead_code: int = 2,
):
super().__init__()
self.decay = decay
init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(codebook_size, dim)
self.codebook_size = codebook_size
self.kmeans_iters = kmeans_iters
self.epsilon = epsilon
self.threshold_ema_dead_code = threshold_ema_dead_code
self.register_buffer("inited", torch.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", torch.zeros(codebook_size))
self.register_buffer("embed", embed)
self.register_buffer("embed_avg", embed.clone())
@torch.jit.ignore
def init_embed_(self, data):
if self.inited:
return
embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.inited.data.copy_(torch.Tensor([True]))
# Make sure all buffers across workers are in sync after initialization
flashy.distrib.broadcast_tensors(self.buffers())
def replace_(self, samples, mask):
modified_codebook = torch.where(
mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
)
self.embed.data.copy_(modified_codebook)
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "... d -> (...) d")
self.replace_(batch_samples, mask=expired_codes)
flashy.distrib.broadcast_tensors(self.buffers())
def preprocess(self, x):
x = rearrange(x, "... d -> (...) d")
return x
def quantize(self, x):
embed = self.embed.t()
dist = -(
x.pow(2).sum(1, keepdim=True)
- 2 * x @ embed
+ embed.pow(2).sum(0, keepdim=True)
)
embed_ind = dist.max(dim=-1).indices
return embed_ind
def postprocess_emb(self, embed_ind, shape):
return embed_ind.view(*shape[:-1])
def dequantize(self, embed_ind):
quantize = F.embedding(embed_ind, self.embed)
return quantize
def encode(self, x):
shape = x.shape
# pre-process
x = self.preprocess(x)
# quantize
embed_ind = self.quantize(x)
# post-process
embed_ind = self.postprocess_emb(embed_ind, shape)
return embed_ind
def decode(self, embed_ind):
quantize = self.dequantize(embed_ind)
return quantize
def forward(self, x):
shape, dtype = x.shape, x.dtype
x = self.preprocess(x)
self.init_embed_(x)
embed_ind = self.quantize(x)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = self.postprocess_emb(embed_ind, shape)
quantize = self.dequantize(embed_ind)
if self.training:
# We do the expiry of code at that point as buffers are in sync
# and all the workers will take the same decision.
self.expire_codes_(x)
ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
embed_sum = x.t() @ embed_onehot
ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
cluster_size = (
laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
* self.cluster_size.sum()
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
self.embed.data.copy_(embed_normalized)
return quantize, embed_ind
class VectorQuantization(nn.Module):
"""Vector quantization implementation.
Currently supports only euclidean distance.
Args:
dim (int): Dimension
codebook_size (int): Codebook size
codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
decay (float): Decay for exponential moving average over the codebooks.
epsilon (float): Epsilon value for numerical stability.
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
kmeans_iters (int): Number of iterations used for kmeans initialization.
threshold_ema_dead_code (int):
channels_last (bool): Channels are the last dimension in the input tensors.
commitment_weight (float): Weight for commitment loss.
orthogonal_reg_weight (float): Orthogonal regularization weights.
orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
for orthogonal regularization.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
"""
def __init__(
self,
dim: int,
codebook_size: int,
codebook_dim: tp.Optional[int] = None,
decay: float = 0.8,
epsilon: float = 1e-5,
kmeans_init: bool = False,
kmeans_iters: int = 10,
threshold_ema_dead_code: int = 2,
channels_last: bool = False,
commitment_weight: float = 1.,
orthogonal_reg_weight: float = 0.0,
orthogonal_reg_active_codes_only: bool = False,
orthogonal_reg_max_codes: tp.Optional[int] = None,
):
super().__init__()
_codebook_dim: int = default(codebook_dim, dim)
requires_projection = _codebook_dim != dim
self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
self.epsilon = epsilon
self.commitment_weight = commitment_weight
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
decay=decay, epsilon=epsilon,
threshold_ema_dead_code=threshold_ema_dead_code)
self.codebook_size = codebook_size
self.channels_last = channels_last
@property
def codebook(self):
return self._codebook.embed
@property
def inited(self):
return self._codebook.inited
def _preprocess(self, x):
if not self.channels_last:
x = rearrange(x, "b d n -> b n d")
return x
def _postprocess(self, quantize):
if not self.channels_last:
quantize = rearrange(quantize, "b n d -> b d n")
return quantize
def encode(self, x):
x = self._preprocess(x)
x = self.project_in(x)
embed_in = self._codebook.encode(x)
return embed_in
def decode(self, embed_ind):
quantize = self._codebook.decode(embed_ind)
quantize = self.project_out(quantize)
quantize = self._postprocess(quantize)
return quantize
def forward(self, x):
device = x.device
x = self._preprocess(x)
x = self.project_in(x)
quantize, embed_ind = self._codebook(x)
if self.training:
quantize = x + (quantize - x).detach()
loss = torch.tensor([0.0], device=device, requires_grad=self.training)
if self.training:
if self.commitment_weight > 0:
commit_loss = F.mse_loss(quantize.detach(), x)
loss = loss + commit_loss * self.commitment_weight
if self.orthogonal_reg_weight > 0:
codebook = self.codebook
if self.orthogonal_reg_active_codes_only:
# only calculate orthogonal loss for the activated codes for this batch
unique_code_ids = torch.unique(embed_ind)
codebook = codebook[unique_code_ids]
num_codes = codebook.shape[0]
if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
codebook = codebook[rand_ids]
orthogonal_reg_loss = orthogonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
quantize = self.project_out(quantize)
quantize = self._postprocess(quantize)
return quantize, embed_ind, loss
class ResidualVectorQuantization(nn.Module):
"""Residual vector quantization implementation.
Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
"""
def __init__(self, *, num_quantizers, **kwargs):
super().__init__()
self.layers = nn.ModuleList(
[VectorQuantization(**kwargs) for _ in range(num_quantizers)]
)
def forward(self, x, n_q: tp.Optional[int] = None):
quantized_out = 0.0
residual = x
all_losses = []
all_indices = []
n_q = n_q or len(self.layers)
for i, layer in enumerate(self.layers[:n_q]):
quantized, indices, loss = layer(residual)
residual = residual - quantized
quantized_out = quantized_out + quantized
all_indices.append(indices)
all_losses.append(loss)
out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
return quantized_out, out_indices, out_losses
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
residual = x
all_indices = []
n_q = n_q or len(self.layers)
for layer in self.layers[:n_q]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
quantized_out = torch.tensor(0.0, device=q_indices.device)
for i, indices in enumerate(q_indices):
layer = self.layers[i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
return quantized_out
| audiocraft-main | audiocraft/quantization/core_vq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Base class for all quantizers.
"""
from dataclasses import dataclass, field
import typing as tp
import torch
from torch import nn
@dataclass
class QuantizedResult:
x: torch.Tensor
codes: torch.Tensor
bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
penalty: tp.Optional[torch.Tensor] = None
metrics: dict = field(default_factory=dict)
class BaseQuantizer(nn.Module):
"""Base class for quantizers.
"""
def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
"""
Given input tensor x, returns first the quantized (or approximately quantized)
representation along with quantized codes, bandwidth, and any penalty term for the loss.
Finally, this returns a dict of metrics to update logging etc.
Frame rate must be passed so that the bandwidth is properly computed.
"""
raise NotImplementedError()
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth."""
raise NotImplementedError()
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
raise NotImplementedError()
@property
def total_codebooks(self):
"""Total number of codebooks."""
raise NotImplementedError()
@property
def num_codebooks(self):
"""Number of active codebooks."""
raise NotImplementedError()
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks."""
raise NotImplementedError()
class DummyQuantizer(BaseQuantizer):
"""Fake quantizer that actually does not perform any quantization.
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, frame_rate: int):
q = x.unsqueeze(1)
return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return x.unsqueeze(1)
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return codes.squeeze(1)
@property
def total_codebooks(self):
"""Total number of codebooks."""
return 1
@property
def num_codebooks(self):
"""Total number of codebooks."""
return self.total_codebooks
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks."""
raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
| audiocraft-main | audiocraft/quantization/base.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import numpy as np
import torch.nn as nn
from .conv import StreamableConv1d, StreamableConvTranspose1d
from .lstm import StreamableLSTM
class SEANetResnetBlock(nn.Module):
"""Residual block from SEANet model.
Args:
dim (int): Dimension of the input/output.
kernel_sizes (list): List of kernel sizes for the convolutions.
dilations (list): List of dilations for the convolutions.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
true_skip (bool): Whether to use true skip connection or a simple
(streamable) convolution as the skip connection.
"""
def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1],
activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False,
pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True):
super().__init__()
assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations'
act = getattr(nn, activation)
hidden = dim // compress
block = []
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
in_chs = dim if i == 0 else hidden
out_chs = dim if i == len(kernel_sizes) - 1 else hidden
block += [
act(**activation_params),
StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation,
norm=norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode),
]
self.block = nn.Sequential(*block)
self.shortcut: nn.Module
if true_skip:
self.shortcut = nn.Identity()
else:
self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode)
def forward(self, x):
return self.shortcut(x) + self.block(x)
class SEANetEncoder(nn.Module):
"""SEANet encoder.
Args:
channels (int): Audio channels.
dimension (int): Intermediate representation dimension.
n_filters (int): Base width for the model.
n_residual_layers (int): nb of residual layers.
ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of
upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here
that must match the decoder order. We use the decoder order as some models may only employ the decoder.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
kernel_size (int): Kernel size for the initial convolution.
last_kernel_size (int): Kernel size for the initial convolution.
residual_kernel_size (int): Kernel size for the residual layers.
dilation_base (int): How much to increase the dilation with each layer.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
true_skip (bool): Whether to use true skip connection or a simple
(streamable) convolution as the skip connection in the residual network blocks.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
lstm (int): Number of LSTM layers at the end of the encoder.
disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
For the encoder, it corresponds to the N first blocks.
"""
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
disable_norm_outer_blocks: int = 0):
super().__init__()
self.channels = channels
self.dimension = dimension
self.n_filters = n_filters
self.ratios = list(reversed(ratios))
del ratios
self.n_residual_layers = n_residual_layers
self.hop_length = np.prod(self.ratios)
self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
self.disable_norm_outer_blocks = disable_norm_outer_blocks
assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
"Number of blocks for which to disable norm is invalid." \
"It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
act = getattr(nn, activation)
mult = 1
model: tp.List[nn.Module] = [
StreamableConv1d(channels, mult * n_filters, kernel_size,
norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
# Downsample to raw audio scale
for i, ratio in enumerate(self.ratios):
block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm
# Add residual layers
for j in range(n_residual_layers):
model += [
SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1],
dilations=[dilation_base ** j, 1],
norm=block_norm, norm_params=norm_params,
activation=activation, activation_params=activation_params,
causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
# Add downsampling layers
model += [
act(**activation_params),
StreamableConv1d(mult * n_filters, mult * n_filters * 2,
kernel_size=ratio * 2, stride=ratio,
norm=block_norm, norm_kwargs=norm_params,
causal=causal, pad_mode=pad_mode),
]
mult *= 2
if lstm:
model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
model += [
act(**activation_params),
StreamableConv1d(mult * n_filters, dimension, last_kernel_size,
norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class SEANetDecoder(nn.Module):
"""SEANet decoder.
Args:
channels (int): Audio channels.
dimension (int): Intermediate representation dimension.
n_filters (int): Base width for the model.
n_residual_layers (int): nb of residual layers.
ratios (Sequence[int]): kernel size and stride ratios.
activation (str): Activation function.
activation_params (dict): Parameters to provide to the activation function.
final_activation (str): Final activation function after all convolutions.
final_activation_params (dict): Parameters to provide to the activation function.
norm (str): Normalization method.
norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
kernel_size (int): Kernel size for the initial convolution.
last_kernel_size (int): Kernel size for the initial convolution.
residual_kernel_size (int): Kernel size for the residual layers.
dilation_base (int): How much to increase the dilation with each layer.
causal (bool): Whether to use fully causal convolution.
pad_mode (str): Padding mode for the convolutions.
true_skip (bool): Whether to use true skip connection or a simple.
(streamable) convolution as the skip connection in the residual network blocks.
compress (int): Reduced dimensionality in residual branches (from Demucs v3).
lstm (int): Number of LSTM layers at the end of the encoder.
disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
For the decoder, it corresponds to the N last blocks.
trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup.
If equal to 1.0, it means that all the trimming is done at the right.
"""
def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None,
norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0):
super().__init__()
self.dimension = dimension
self.channels = channels
self.n_filters = n_filters
self.ratios = ratios
del ratios
self.n_residual_layers = n_residual_layers
self.hop_length = np.prod(self.ratios)
self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
self.disable_norm_outer_blocks = disable_norm_outer_blocks
assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
"Number of blocks for which to disable norm is invalid." \
"It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
act = getattr(nn, activation)
mult = int(2 ** len(self.ratios))
model: tp.List[nn.Module] = [
StreamableConv1d(dimension, mult * n_filters, kernel_size,
norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
if lstm:
model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
# Upsample to raw audio scale
for i, ratio in enumerate(self.ratios):
block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm
# Add upsampling layers
model += [
act(**activation_params),
StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2,
kernel_size=ratio * 2, stride=ratio,
norm=block_norm, norm_kwargs=norm_params,
causal=causal, trim_right_ratio=trim_right_ratio),
]
# Add residual layers
for j in range(n_residual_layers):
model += [
SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1],
dilations=[dilation_base ** j, 1],
activation=activation, activation_params=activation_params,
norm=block_norm, norm_params=norm_params, causal=causal,
pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
mult //= 2
# Add final layers
model += [
act(**activation_params),
StreamableConv1d(n_filters, channels, last_kernel_size,
norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
]
# Add optional final activation to decoder (eg. tanh)
if final_activation is not None:
final_act = getattr(nn, final_activation)
final_activation_params = final_activation_params or {}
model += [
final_act(**final_activation_params)
]
self.model = nn.Sequential(*model)
def forward(self, z):
y = self.model(z)
return y
| audiocraft-main | audiocraft/modules/seanet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch import Tensor
from typing import Union, Callable
class CustomGLU(nn.Module):
"""Custom Gated Linear Unit activation.
Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half
of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation
function (i.e. sigmoid, swish, etc.).
Args:
activation (nn.Module): The custom activation to apply in the Gated Linear Unit
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = CustomGLU(nn.Sigmoid())
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
def __init__(self, activation: nn.Module, dim: int = -1):
super(CustomGLU, self).__init__()
self.dim = dim
self.activation = activation
def forward(self, x: Tensor):
assert x.shape[self.dim] % 2 == 0 # M = N / 2
a, b = torch.chunk(x, 2, dim=self.dim)
return a * self.activation(b)
class SwiGLU(CustomGLU):
"""SiLU Gated Linear Unit activation.
Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(SwiGLU, self).__init__(nn.SiLU(), dim)
class GeGLU(CustomGLU):
"""GeLU Gated Linear Unit activation.
Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(GeGLU, self).__init__(nn.GELU(), dim)
class ReGLU(CustomGLU):
"""ReLU Gated Linear Unit activation.
Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is
the first half of the input matrices, :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
"""
def __init__(self, dim: int = -1):
super(ReGLU, self).__init__(nn.ReLU(), dim)
def get_activation_fn(
activation: Union[str, Callable[[Tensor], Tensor]]
) -> Union[str, Callable[[Tensor], Tensor]]:
"""Helper function to map an activation string to the activation class.
If the supplied activation is not a string that is recognized, the activation is passed back.
Args:
activation (str, or Callable[[Tensor], Tensor]): Activation to check
"""
if isinstance(activation, str):
if activation == "reglu":
return ReGLU()
elif activation == "geglu":
return GeGLU()
elif activation == "swiglu":
return SwiGLU()
return activation
| audiocraft-main | audiocraft/modules/activations.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.