python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
src_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2.pt"
ref_ckpt = "/checkpoint/wnhsu/w2v/hubert_icassp_oss_v3/iter2_km100-400k-grp-L6/oss.km500_p0_1_s334.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU100k.s1337.ngpu32/checkpoint_last.pt"
new_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2_updated.pt"
def update_state(state):
state["model"]["label_embs_concat"] = state["model"].pop("label_embs")
state["args"].task = "hubert_pretraining"
state["args"].labels = f"['{state['args'].labels}']"
return state
src_state = torch.load(src_ckpt)
src_state = update_state(src_state)
torch.save(src_state, new_ckpt)
| EXA-1-master | exa/libraries/fairseq/examples/hubert/update_ckpt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os.path as op
import re
from tabulate import tabulate
from collections import Counter
def comp_purity(p_xy, axis):
max_p = p_xy.max(axis=axis)
marg_p = p_xy.sum(axis=axis)
indv_pur = max_p / marg_p
aggr_pur = max_p.sum()
return indv_pur, aggr_pur
def comp_entropy(p):
return (-p * np.log(p + 1e-8)).sum()
def comp_norm_mutual_info(p_xy):
p_x = p_xy.sum(axis=1, keepdims=True)
p_y = p_xy.sum(axis=0, keepdims=True)
pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8)
mi = (p_xy * pmi).sum()
h_x = comp_entropy(p_x)
h_y = comp_entropy(p_y)
return mi, mi / h_x, mi / h_y, h_x, h_y
def pad(labs, n):
if n == 0:
return np.array(labs)
return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n])
def comp_avg_seg_dur(labs_list):
n_frms = 0
n_segs = 0
for labs in labs_list:
labs = np.array(labs)
edges = np.zeros(len(labs)).astype(bool)
edges[0] = True
edges[1:] = labs[1:] != labs[:-1]
n_frms += len(edges)
n_segs += edges.astype(int).sum()
return n_frms / n_segs
def comp_joint_prob(uid2refs, uid2hyps):
"""
Args:
pad: padding for spliced-feature derived labels
"""
cnts = Counter()
skipped = []
abs_frmdiff = 0
for uid in uid2refs:
if uid not in uid2hyps:
skipped.append(uid)
continue
refs = uid2refs[uid]
hyps = uid2hyps[uid]
abs_frmdiff += abs(len(refs) - len(hyps))
min_len = min(len(refs), len(hyps))
refs = refs[:min_len]
hyps = hyps[:min_len]
cnts.update(zip(refs, hyps))
tot = sum(cnts.values())
ref_set = sorted({ref for ref, _ in cnts.keys()})
hyp_set = sorted({hyp for _, hyp in cnts.keys()})
ref2pid = dict(zip(ref_set, range(len(ref_set))))
hyp2lid = dict(zip(hyp_set, range(len(hyp_set))))
# print(hyp_set)
p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float)
for (ref, hyp), cnt in cnts.items():
p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt
p_xy /= p_xy.sum()
return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped
def read_phn(tsv_path, rm_stress=True):
uid2phns = {}
with open(tsv_path) as f:
for line in f:
uid, phns = line.rstrip().split("\t")
phns = phns.split(",")
if rm_stress:
phns = [re.sub("[0-9]", "", phn) for phn in phns]
uid2phns[uid] = phns
return uid2phns
def read_lab(tsv_path, lab_path, pad_len=0, upsample=1):
"""
tsv is needed to retrieve the uids for the labels
"""
with open(tsv_path) as f:
f.readline()
uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f]
with open(lab_path) as f:
labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f]
assert len(uids) == len(labs_list)
return dict(zip(uids, labs_list))
def main_lab_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
ref_dir,
ref_name,
pad_len=0,
upsample=1,
verbose=False,
):
# assume tsv_dir is the same for both the reference and the hypotheses
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
uid2refs = {}
for s in lab_sets:
uid2refs.update(read_lab(f"{tsv_dir}/{s}.tsv", f"{ref_dir}/{s}.{ref_name}"))
uid2hyps = {}
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def main_phn_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
phn_dir,
phn_sets,
pad_len=0,
upsample=1,
verbose=False,
):
uid2refs = {}
for s in phn_sets:
uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv"))
uid2hyps = {}
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def _main(uid2refs, uid2hyps, verbose):
(p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
uid2refs, uid2hyps
)
ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
(mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
outputs = {
"ref pur": ref_pur,
"hyp pur": hyp_pur,
"H(ref)": h_ref,
"H(hyp)": h_hyp,
"MI": mi,
"MI/H(ref)": mi_norm_by_ref,
"ref segL": comp_avg_seg_dur(uid2refs.values()),
"hyp segL": comp_avg_seg_dur(uid2hyps.values()),
"p_xy shape": p_xy.shape,
"frm tot": tot,
"frm diff": frmdiff,
"utt tot": len(uid2refs),
"utt miss": len(skipped),
}
print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f"))
if __name__ == "__main__":
"""
compute quality of labels with respect to phone or another labels if set
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("lab_dir")
parser.add_argument("lab_name")
parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+")
parser.add_argument(
"--phn_dir",
default="/checkpoint/wnhsu/data/librispeech/960h/fa/raw_phn/phone_frame_align_v1",
)
parser.add_argument(
"--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+"
)
parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses")
parser.add_argument(
"--upsample", default=1, type=int, help="upsample factor for hypotheses"
)
parser.add_argument("--ref_lab_dir", default="")
parser.add_argument("--ref_lab_name", default="")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
if args.ref_lab_dir and args.ref_lab_name:
main_lab_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.ref_lab_dir,
args.ref_lab_name,
args.pad_len,
args.upsample,
args.verbose,
)
else:
main_phn_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.phn_dir,
args.phn_sets,
args.pad_len,
args.upsample,
args.verbose,
)
| EXA-1-master | exa/libraries/fairseq/examples/hubert/measure_teacher_quality.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import joblib
import torch
import tqdm
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_km_label")
class ApplyKmeans(object):
def __init__(self, km_path):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (
(x ** 2).sum(1, keepdims=True)
- 2 * np.matmul(x, self.C_np)
+ self.Cnorm_np
)
return np.argmin(dist, axis=1)
def get_feat_iterator(feat_dir, split, nshard, rank):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
def iterate():
feat = np.load(feat_path, mmap_mode="r")
assert feat.shape[0] == (offsets[-1] + lengs[-1])
for offset, leng in zip(offsets, lengs):
yield feat[offset: offset + leng]
return iterate, len(lengs)
def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir):
apply_kmeans = ApplyKmeans(km_path)
generator, num = get_feat_iterator(feat_dir, split, nshard, rank)
iterator = generator()
lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km"
os.makedirs(lab_dir, exist_ok=True)
with open(lab_path, "w") as f:
for feat in tqdm.tqdm(iterator, total=num):
# feat = torch.from_numpy(feat).cuda()
lab = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, lab)) + "\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("km_path")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("lab_dir")
args = parser.parse_args()
logging.info(str(args))
dump_label(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/dump_km_label.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
from feature_utils import get_path_iterator, dump_feature
from fairseq.data.audio.audio_utils import get_features_or_waveform
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature")
class HubertFeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
def read_audio(self, path, ref_len=None):
wav = get_features_or_waveform(path, need_waveform=True, use_sample_rate=self.task.cfg.sample_rate)
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len=ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start : start + self.max_chunk]
feat_chunk, _ = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
reader = HubertFeatureReader(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import os
import os.path as op
import sys
from dump_hubert_feature import HubertFeatureReader
from feature_utils import get_shard_range, dump_feature
from fairseq.data.audio.audio_utils import get_features_or_waveform
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature_s2t")
class HubertFeatureReaderS2T(HubertFeatureReader):
def read_audio(self, path, ref_len=None):
wav = get_features_or_waveform(
path, need_waveform=True, use_sample_rate=self.task.cfg.sample_rate
)
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_path_iterator(root, tsv, nshard, rank, audio_col_name):
with open(tsv) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
subpaths = [op.join(root, e[audio_col_name]) for e in reader]
start, end = get_shard_range(len(subpaths), nshard, rank)
subpaths = subpaths[start:end]
def iterate():
for subpath in subpaths:
yield op.join(root, subpath), None
return iterate, len(subpaths)
def main(
root,
tsv_path,
ckpt_path,
layer,
nshard,
rank,
feat_dir,
split,
max_chunk,
audio_col_name,
):
reader = HubertFeatureReaderS2T(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(root, tsv_path, nshard, rank, audio_col_name)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("root")
parser.add_argument("tsv_path")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("--audio_col_name", type=str, default="audio")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature_s2t.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import tqdm
from npy_append_array import NpyAppendArray
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("feature_utils")
def get_shard_range(tot, nshard, rank):
assert rank < nshard and rank >= 0, f"invaid rank/nshard {rank}/{nshard}"
start = round(tot / nshard * rank)
end = round(tot / nshard * (rank + 1))
assert start < end, f"start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
return start, end
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = get_shard_range(len(lines), nshard, rank)
lines = lines[start:end]
def iterate():
for line in lines:
subpath, nsample = line.split("\t")
yield f"{root}/{subpath}", int(nsample)
return iterate, len(lines)
def dump_feature(reader, generator, num, split, nshard, rank, feat_dir):
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/feature_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(feat_dir, split, nshard, rank, percent):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
if percent < 0:
return np.load(feat_path, mmap_mode="r")
else:
nsample = int(np.ceil(len(lengs) * percent))
indices = np.random.choice(len(lengs), nsample, replace=False)
feat = np.load(feat_path, mmap_mode="r")
sampled_feat = np.concatenate(
[feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from shard {rank}/{nshard}"
)
)
return sampled_feat
def load_feature(feat_dir, split, nshard, seed, percent):
assert percent <= 1.0
feat = np.concatenate(
[
load_feature_shard(feat_dir, split, nshard, r, percent)
for r in range(nshard)
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
feat_dir,
split,
nshard,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = load_feature(feat_dir, split, nshard, seed, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir", type=str)
parser.add_argument("split", type=str)
parser.add_argument("nshard", type=int)
parser.add_argument("km_path", type=str)
parser.add_argument("n_clusters", type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import soundfile as sf
import torch
import torchaudio
from feature_utils import get_path_iterator, dump_feature
from fairseq.data.audio.audio_utils import get_features_or_waveform
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_mfcc_feature")
class MfccFeatureReader(object):
def __init__(self, sample_rate):
self.sample_rate = sample_rate
def read_audio(self, path, ref_len=None):
wav = get_features_or_waveform(path, need_waveform=True, use_sample_rate=self.sample_rate)
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len=ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.view(1, -1)
mfccs = torchaudio.compliance.kaldi.mfcc(
waveform=x,
sample_frequency=self.sample_rate,
use_energy=False,
) # (time, freq)
mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1).contiguous() # (freq, time)
return concat
def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate):
reader = MfccFeatureReader(sample_rate)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--sample_rate", type=int, default=16000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/dump_mfcc_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
from feature_utils import get_path_iterator, dump_feature
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_w2v2_feature")
class Wav2Vec2FeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer # assume this is 1-based like HuBERT
self.max_chunk = max_chunk
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
logger.info(f" model:\n{self.model}")
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.task.cfg.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start: start + self.max_chunk]
res = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
layer=self.layer - 1,
)
feat_chunk = res["x"]
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def main():
"""
Create code file with the following format:
{'audio': 'file1', 'unitA': 'file1_chnl1_units', 'unitB': 'file1_chnl2_units'}
{'audio': 'file2', 'unitA': 'file2_chnl1_units', 'unitB': 'file2_chnl2_units'}
...
Given the input units files
- channel1_units_file:
file1|file1_chnl1_units
file2|file2_chnl1_units
...
- channel2_units_file:
file1|file1_chnl2_units
file2|file2_chnl2_units
...
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"channel1_units_file",
type=str,
help="Units of the first channel.",
)
parser.add_argument(
"channel2_units_file",
type=str,
help="Units of the second channel.",
)
parser.add_argument(
"output_file",
type=str,
help="Output file.",
)
parser.add_argument(
"--channels",
type=str,
default='unitA,unitB',
help="Comma-separated list of the channel names to create in the code"
"(Default: 'unitA,unitB').",
)
args = parser.parse_args()
channel_names = args.channels.split(',')
with open(args.channel1_units_file) as funit1, \
open(args.channel2_units_file) as funit2, \
open(args.output_file, 'w') as fout:
for line1, line2 in zip(funit1, funit2):
fname1, units1 = line1.strip().split('|')
fname2, units2 = line2.strip().split('|')
assert len(units1.split()) == len(units2.split()), \
f"Mismatch units length ({len(units1.split())} vs {len(units2.split())})"
base_fname1 = fname1[:-9]
base_fname2 = fname2[:-9]
assert base_fname1 == base_fname2, \
f"Mismatch filenames ({base_fname1} vs {base_fname2}). " \
f"Expected $filename-channel1 and $filename-channel2 in two files"
code = {
"audio" : base_fname1,
channel_names[0] : units1,
channel_names[1] : units2,
}
fout.write(str(code))
fout.write("\n")
print(f"Codes written to {args.output_file}")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/dgslm/create_code_file.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import json
from fairseq import utils
from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder
# from examples.hubert.simple_kmeans.dump_hubert_feature import HubertFeatureReader
from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import HubertFeatureReader
from examples.hubert.simple_kmeans.dump_km_label import ApplyKmeans
# Hubert tokenizer
class HubertTokenizer:
def __init__(
self,
hubert_path,
hubert_layer,
km_path,
use_cuda=True,
):
self.feature_extractor = HubertFeatureReader(hubert_path, hubert_layer, use_cuda=use_cuda)
self.quantizer = ApplyKmeans(km_path)
if not use_cuda:
self.quantizer.C = self.quantizer.C.cpu()
self.quantizer.Cnorm = self.quantizer.Cnorm.cpu()
def wav2code(self, path, channel_id=1):
feat = self.feature_extractor.get_feats(path, channel_id=channel_id)
code = self.quantizer(feat)
return ' '.join(map(str, code))
def wav2codes(self, path):
codes = [
self.wav2code(path, channel_id=1),
self.wav2code(path, channel_id=2)
]
return codes
# Vocoder
class HifiganVocoder:
def __init__(
self,
vocoder_path,
vocoder_cfg_path,
use_cuda=True,
):
with open(vocoder_cfg_path) as f:
cfg = json.load(f)
self.vocoder = CodeHiFiGANVocoder(vocoder_path, cfg).eval()
self.use_cuda = use_cuda
if self.use_cuda:
self.vocoder.cuda()
def code2wav(self, code, speaker_id=0, pred_dur=False):
if isinstance(code, str):
code = list(map(int, code.split()))
inp = {"code": torch.LongTensor(code).view(1, -1)}
if self.vocoder.model.multispkr:
inp["spkr"] = torch.LongTensor([speaker_id]).view(1, 1)
if self.use_cuda:
inp = utils.move_to_cuda(inp)
return self.vocoder(inp, pred_dur).detach().cpu().numpy()
def codes2wav(self, codes, speaker_ids=[0, 4], pred_dur=False):
if isinstance(codes, dict):
codes = list(codes.values())
assert len(codes) == 2
wav1 = self.code2wav(codes[0], speaker_ids[0], pred_dur)
wav2 = self.code2wav(codes[1], speaker_ids[1], pred_dur)
wav = np.stack([wav1, wav2])
return wav
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/dgslm/dgslm_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import ast
import argparse
import logging
import torch
from fairseq import utils
from fairseq.models.speech_dlm import SpeechDLM
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def load_data(in_file):
with open(in_file) as f:
data = [ast.literal_eval(line.strip()) for line in f]
return data
def write_data(out_file, data):
with open(out_file, 'w') as f:
for d in data:
f.write(str(d))
f.write('\n')
def limit(codes, n):
new_codes = {}
for k, v in codes.items():
new_codes[k] = ' '.join(v.split()[:n])
return new_codes
def main(args):
logger.info(args)
use_cuda = torch.cuda.is_available()
# Load the data
data = load_data(args.in_file)
channels = args.channels.split(',')
unit_sequences = [{
channels[0]: d[channels[0]],
channels[1]: d[channels[1]],
} for d in data]
fnames = [d['audio'] for d in data]
print(f"Found {len(data)} sequences from {args.in_file}")
# Limit the prefix size
if args.prefix_size is not None:
print(f"Limit the prefix size to {args.prefix_size}")
unit_sequences = [limit(codes, args.prefix_size) for codes in unit_sequences]
# Load model from ckpt
print(f"Loading the SpeechDLM model from {args.ckpt}")
model = SpeechDLM.from_pretrained(
model_name_or_path=os.path.dirname(args.ckpt),
checkpoint_file=os.path.basename(args.ckpt),
data_name_or_path=args.data
)
model.eval()
if use_cuda:
model.cuda()
# Set batch sizes
model.cfg.dataset.max_tokens = args.batch_max_tokens
model.max_positions = args.batch_max_positions
if args.batch_max_sentences is not None:
model.cfg.dataset.batch_size = args.batch_max_sentences
# Set seed (if needed)
if args.seed is not None:
utils.set_torch_seed(args.seed)
# Sample from the SpeechDLM model
print(f"Generating {len(unit_sequences)} sequences with SpeechDLM model...\n"
f"Generation args: sampling={(not args.beam_search)}, "
f"sampling_topk={args.sampling_topk}, sampling_topp={args.sampling_topp}, "
f"beam={args.beam_size}, min_len={args.min_len}, "
f"max_len_a={args.max_len_a}, max_len_b={args.max_len_b}, "
f"temperature={args.temperature}, dur_temperature={args.dur_temperature}, "
f"seed={args.seed}")
generated_units = model.sample(
unit_sequences,
sampling=(not args.beam_search),
sampling_topk=args.sampling_topk,
sampling_topp=args.sampling_topp,
beam=args.beam_size,
max_len_a=args.max_len_a,
max_len_b=args.max_len_b,
min_len=args.min_len,
temperature=args.temperature,
duration_temperature=args.dur_temperature,
verbose=args.verbose,
skip_invalid_size_inputs=args.skip_invalid_size_batch,
)
# Create the generated sequences
generated_data = []
for fname, gen_units in zip(fnames, generated_units):
d = {
"audio" : fname+'-generated',
**gen_units
}
generated_data.append(d)
# Write the generated sequences
print(f"Write the generated units to {args.out_file}")
if args.out_file:
os.makedirs(os.path.dirname(args.out_file), exist_ok=True)
write_data(args.out_file, generated_data)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in-file",
type=str,
required=True,
help="Input file following the same format of the output from create_input.py",
)
parser.add_argument(
"--ckpt",
type=str,
required=True,
help="Path to the model checkpoint."
)
parser.add_argument(
"--data",
type=str,
required=True,
help="path to the model data dir (containing dict files)",
)
parser.add_argument(
"--out-file",
type=str,
required=True,
help="Path of the output file.",
)
parser.add_argument(
"--channels",
type=str,
default='unitA,unitB',
help="Comma-separated list of the channel names"
"(Default: 'unitA,unitB').",
)
parser.add_argument("--prefix-size", type=int, default=None,
help='Limit the prefix size')
# Batch sizes
parser.add_argument("--batch-max-tokens", type=int, default=9216,
help='maximum number of tokens considered in a batch')
parser.add_argument("--batch-max-positions", type=int, default=6144,
help='maximum number of tokens allowed for a sentence in a batch')
parser.add_argument("--batch-max-sentences", type=int, default=None,
help='maximum number of sentences considered in a batch')
parser.add_argument("--skip-invalid-size-batch", action='store_true',
help='skip sentences with more tokens than --batch-max-positions')
# Generation args
parser.add_argument("--beam-search", action='store_true',
help='perform beam search instead of sampling')
parser.add_argument("--beam-size", type=int, default=5,
help="beam width (used in both sampling and beam search mode) "
"(default: 5)")
parser.add_argument("--sampling-topk", type=int, default=-1,
help="only sample from top-k candidates (default: -1, non applied)")
parser.add_argument("--sampling-topp", type=float, default=-1.0,
help="only sample among the smallest set of elements whose cumulative "
"probability mass exceeds p (default: -1.0, non applied)")
parser.add_argument("--max-len-a", type=int, default=0,
help="generate sequences of maximum length ax + b, "
"where x is the source length (default: 0)")
parser.add_argument("--max-len-b", type=int, default=500,
help="generate sequences of maximum length ax + b, "
"where x is the source length (default: 500 ~ 10s)")
parser.add_argument("--min-len", type=int, default=1,
help="generate sequences of maximum length ax + b, "
"where x is the source length (default: 1)")
parser.add_argument("--temperature", type=float, default=1.0,
help="temperature when generating unit tokens (default: 1.0)")
parser.add_argument("--dur-temperature", type=float, default=1.0,
help="temperature when generating duration tokens (default: 1.0)")
parser.add_argument("--verbose", action='store_true',
help="print the scores given by the model to generated sequences")
parser.add_argument("--seed", type=int, default=123,
help="seed of the generation model")
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/dgslm/sample_speech_dlm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import argparse
import json
import logging
from pathlib import Path
import soundfile as sf
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def dump_result(args, data, sample_id, pred_wav):
assert "audio" in data or args.results_path is not None
if args.results_path:
fname = Path(data["audio"]).stem + ".wav" if "audio" in data else f"{sample_id}_pred.wav"
out_file = Path(args.results_path) / fname
sf.write(
out_file.as_posix(),
pred_wav.detach().cpu().numpy(),
args.sample_rate,
)
def load_data(in_file):
with open(in_file) as f:
data = [ast.literal_eval(line.strip()) for line in f]
return data
def load_vocoder(vocoder_path, vocoder_cfg_path, use_cuda=True):
with open(vocoder_cfg_path) as f:
cfg = json.load(f)
vocoder = CodeHiFiGANVocoder(vocoder_path, cfg).eval()
if use_cuda:
vocoder = vocoder.cuda()
return vocoder
def code2wav(vocoder, code, speaker_id, use_cuda=True):
if isinstance(code, str):
code = list(map(int, code.split()))
inp = dict()
inp["code"] = torch.LongTensor(code).view(1, -1)
if vocoder.model.multispkr:
inp["spkr"] = torch.LongTensor([speaker_id]).view(1, 1)
if use_cuda:
inp = utils.move_to_cuda(inp)
return vocoder(inp)
def main(args):
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
vocoder = load_vocoder(args.vocoder, args.vocoder_cfg, use_cuda)
data = load_data(args.in_file)
if args.results_path:
Path(args.results_path).mkdir(exist_ok=True, parents=True)
channels = args.channels.split(',')
speakers = [args.channel1_spk, args.channel2_spk]
for i, d in tqdm(enumerate(data), total=len(data)):
wavs = []
for key, speaker_id in zip(channels, speakers):
wav = code2wav(vocoder, d[key], speaker_id, use_cuda=use_cuda)
wavs.append(wav)
wav = torch.stack(wavs, dim=-1)
if args.mix:
wav = torch.mean(wav, dim=-1)
dump_result(args, d, i, wav)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in-file",
type=str,
required=True,
help="Input file following the same format of the output from create_input.py",
)
parser.add_argument(
"--vocoder", type=str, required=True, help="path to the vocoder"
)
parser.add_argument(
"--vocoder-cfg",
type=str,
required=True,
help="path to the vocoder config",
)
parser.add_argument(
"--channels",
type=str,
default='unitA,unitB',
help="Comma-separated list of the channel names"
"(Default: 'unitA,unitB').",
)
parser.add_argument("--sample-rate", type=int, default=16_000)
parser.add_argument(
"--results-path",
type=str,
default=None,
help="Output directory. If not set, the audios will be stored following the 'audio' field specified in the input file",
)
parser.add_argument("--channel1-spk", type=int, default=0, help="Speaker of the first channel",)
parser.add_argument("--channel2-spk", type=int, default=4, help="Speaker of the second channel",)
parser.add_argument("--mix", action="store_true", help="Mix the two channels to create output mono files")
parser.add_argument("--cpu", action="store_true", help="run on CPU")
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/dgslm/vocoder_hifigan/generate_stereo_waveform.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from functools import partial
import numpy as np
import torch
from tqdm import tqdm
from data_utils import dump_speaker_f0_stat, F0Stat, load_audio_path, load_f0
def load_speaker(path):
speakers = []
with open(path) as f:
for line in f.readlines():
sample = eval(line.strip())
assert "speaker" in sample
speakers.append(sample["speaker"])
return speakers
def quantize_f0(speaker_to_f0, f0_stats, nbins, normalize, log):
f0_all = []
for speaker, f0 in speaker_to_f0.items():
f0 = f0.raw_data
if log:
f0 = f0.log()
mean = f0_stats[speaker]["logf0_mean"] if log else f0_stats[speaker]["f0_mean"]
std = f0_stats[speaker]["logf0_std"] if log else f0_stats[speaker]["f0_std"]
if normalize == "mean":
f0 = f0 - mean
elif normalize == "meanstd":
f0 = (f0 - mean) / std
f0_all.extend(f0.tolist())
hist, bin_x = np.histogram(f0_all, 100000)
cum_hist = np.cumsum(hist) / len(f0_all) * 100
f0_bin = {}
for num_bin in nbins:
bin_offset = []
bin_size = 100 / num_bin
threshold = bin_size
for i in range(num_bin - 1):
index = (np.abs(cum_hist - threshold)).argmin()
bin_offset.append(bin_x[index])
threshold += bin_size
f0_bin[num_bin] = np.array(bin_offset)
return f0_bin
def main(file_path, f0_dir, out_dir, out_prefix, nbins, nshards, normalize, log):
audio_paths = load_audio_path(file_path)
path_to_f0 = load_f0(f0_dir, nshards)
speakers = load_speaker(file_path)
speaker_to_f0 = defaultdict(partial(F0Stat, True))
# speaker f0 stats
for audio_path, speaker in tqdm(zip(audio_paths, speakers)):
f0 = path_to_f0[audio_path]
speaker_to_f0[speaker].update(f0)
f0_stats = dump_speaker_f0_stat(speaker_to_f0, f"{out_dir}/{out_prefix}")
# quantize
f0_bin = quantize_f0(speaker_to_f0, f0_stats, nbins, normalize, log)
log_suffix = "_log" if log else ""
f0_bin_out_file = f"{out_dir}/{out_prefix}_{normalize}_norm{log_suffix}_f0_bin.th"
torch.save(f0_bin, f0_bin_out_file)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file_path")
parser.add_argument("f0_dir", help="out_dir from preprocess_f0")
parser.add_argument("out_dir")
parser.add_argument("out_prefix")
parser.add_argument("--nbins", nargs="+", type=int, default=[32])
parser.add_argument("--nshards", type=int, default=20, help="number of f0 shards")
parser.add_argument(
"--normalize", type=str, choices=["meanstd", "mean", "none"], default="mean"
)
parser.add_argument("--log", action="store_true")
args = parser.parse_args()
print(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/quantize_f0.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from multiprocessing import Pool
import os
from collections import defaultdict
from itertools import starmap
import torch
from npy_append_array import NpyAppendArray
from tqdm import tqdm
from data_utils import dump_speaker_f0_stat, F0Stat, load_f0
from fairseq.data.codedataset import (
ExpressiveCodeDataConfig,
parse_manifest,
F0_FRAME_SPACE,
align_f0_to_durations,
)
from fairseq.tasks.speech_ulm_task import UnitDictionary
def load_meta(meta_path, split):
config = ExpressiveCodeDataConfig(meta_path)
manifest_path = config.manifests[split]
dictionary = UnitDictionary(n_units=config.n_units)
audio_paths, codes, durs, speakers = parse_manifest(manifest_path, dictionary)
return config, audio_paths, codes, durs, speakers
def _align_f0(f0, dur, ratio, frm_tol=5):
if f0 is None:
seg_f0 = torch.zeros_like(dur, dtype=torch.float)
else:
seg_f0 = align_f0_to_durations(f0, dur, ratio, tol=frm_tol * ratio)
return seg_f0.numpy() # try a hacky stuff
def align_f0(path_to_f0, audio_paths, durs, ratio, mp=False):
chunk_size = 2000
num_procs = 40
iterable = ((path_to_f0[p], d, ratio) for p, d in zip(audio_paths, durs))
seg_f0s = []
if mp:
with Pool(num_procs) as pool:
iterator = tqdm(
pool.istarmap(_align_f0, iterable, chunk_size),
desc="align f0",
total=len(durs),
)
for seg_f0 in iterator:
seg_f0s.append(torch.from_numpy(seg_f0).float())
else:
iterator = tqdm(starmap(_align_f0, iterable), desc="align f0", total=len(durs))
for seg_f0 in iterator:
seg_f0s.append(torch.from_numpy(seg_f0).float())
return seg_f0s
def prepare_seg_data(config, audio_paths, codes, durs, speakers, path_to_f0):
ratio = config.code_hop_size / (config.sampling_rate * F0_FRAME_SPACE)
seg_f0s = align_f0(path_to_f0, audio_paths, durs, ratio)
data = {
"codes": codes,
"duration": durs,
"f0": seg_f0s,
"speaker": speakers,
"path": audio_paths,
}
return data
def dump_seg_data(data, out_prefix):
key_targs = {
"codes": f"{out_prefix}.code.npy",
"duration": f"{out_prefix}.dur.npy",
"f0": f"{out_prefix}.f0.npy",
}
for key, targ in key_targs.items():
assert not os.path.exists(targ)
npaa = NpyAppendArray(targ)
for utt_data in tqdm(data[key], desc=f"dumping {key}"):
npaa.append(utt_data.numpy())
assert not os.path.exists(f"{out_prefix}.path.txt")
with open(f"{out_prefix}.path.txt", "w") as f:
for x in data["path"]:
f.write(f"{str(x)}\n")
assert not os.path.exists(f"{out_prefix}.leng.txt")
with open(f"{out_prefix}.leng.txt", "w") as f:
for x in data["codes"]:
f.write(f"{len(x)}\n")
assert not os.path.exists(f"{out_prefix}.speaker.txt")
with open(f"{out_prefix}.speaker.txt", "w") as f:
for x in data["speaker"]:
f.write(f"{str(x)}\n")
print(f"wrote to files with prefix {out_prefix}")
def main(meta_path, f0_dir, splits, nshards_list):
speaker_to_stat = defaultdict(F0Stat)
if len(nshards_list) == 1:
nshards_list = nshards_list * len(splits)
else:
assert len(nshards_list) == len(splits)
for split, nshards in zip(splits, nshards_list):
config, audio_paths, codes, durs, speakers = load_meta(meta_path, split)
path_to_f0 = load_f0(f"{f0_dir}/{split}", nshards)
# segment-level data
data = prepare_seg_data(config, audio_paths, codes, durs, speakers, path_to_f0)
dump_seg_data(data, config.manifests[split])
# speaker f0
for audio_path, speaker in tqdm(zip(audio_paths, speakers)):
f0 = path_to_f0[audio_path]
speaker_to_stat[speaker].update(f0)
dump_speaker_f0_stat(speaker_to_stat, config.manifests[split])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("meta_path")
parser.add_argument("f0_dir", help="out_dir from preprocess_f0")
parser.add_argument("--splits", nargs="+", default=["train", "valid"])
parser.add_argument(
"--nshards_list", type=int, nargs="+", default=[20], help="number of f0 shards"
)
args = parser.parse_args()
print(args)
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/prepare_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import warnings
def truncated_laplace(mean, T, truncate_by_zero=False):
"""Generating a sample from a Laplace distribution, possible left-truncated at zero.
A bit of explanation here https://stats.stackexchange.com/a/357598 .
"""
assert isinstance(mean, torch.Tensor)
if not truncate_by_zero:
percentile = 0.0
else:
if not (mean >= 0.0).all():
warnings.warn(f"means are supposed to be non-negative, but got {mean}")
mean = torch.clamp_min(mean, 0.0)
lower_bound = mean.new_tensor([0.0])
percentile = 0.5 + 0.5 * torch.sign(lower_bound - mean) * (
1.0 - torch.exp(-1.0 / T * torch.abs(mean - lower_bound))
)
p = torch.empty_like(mean).uniform_() * (1.0 - percentile) + percentile
return mean - T * torch.sign(p - 0.5) * torch.log(1 - 2 * torch.abs(p - 0.5))
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/truncated_laplace.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import argparse
import json
import logging
from pathlib import Path
import soundfile as sf
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def dump_result(args, data, sample_id, pred_wav):
assert "audio" in data or args.results_path is not None
if args.results_path:
fname = Path(data["audio"]).name if "audio" in data else f"{sample_id}_pred.wav"
out_file = Path(args.results_path) / fname
sf.write(
out_file.as_posix(),
pred_wav.detach().cpu().numpy(),
args.sample_rate,
)
def load_data(in_file):
with open(in_file) as f:
data = [ast.literal_eval(line.strip()) for line in f]
return data
def get_f0_upsample_ratio(code_hop_size, f_hop_size):
ratio = (code_hop_size // 160) // (f_hop_size // 256) * 2
return ratio
def main(args):
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
with open(args.vocoder_cfg) as f:
vocoder_cfg = json.load(f)
vocoder = CodeHiFiGANVocoder(args.vocoder, vocoder_cfg)
if use_cuda:
vocoder = vocoder.cuda()
data = load_data(args.in_file)
if args.results_path:
Path(args.results_path).mkdir(exist_ok=True, parents=True)
for i, d in tqdm(enumerate(data), total=len(data)):
code_key = "cpc_km100" if "cpc_km100" in d else "hubert"
code = list(map(int, d[code_key].split()))
x = {
"code": torch.LongTensor(code).view(1, -1),
"f0": torch.Tensor(d["f0"]).view(1, -1),
}
f0_up_ratio = get_f0_upsample_ratio(
vocoder_cfg["code_hop_size"], vocoder_cfg["hop_size"]
)
if f0_up_ratio > 1:
bsz, cond_length = x["f0"].size()
x["f0"] = x["f0"].unsqueeze(2).repeat(1, 1, f0_up_ratio).view(bsz, -1)
x = utils.move_to_cuda(x) if use_cuda else x
wav = vocoder(x)
dump_result(args, d, i, wav)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in-file",
type=str,
required=True,
help="Input file following the same format of the output from sample.py ('f0' and 'cpc_km100/hubert' are required fields)",
)
parser.add_argument(
"--vocoder", type=str, required=True, help="path to the vocoder"
)
parser.add_argument(
"--vocoder-cfg",
type=str,
required=True,
help="path to the vocoder config",
)
parser.add_argument("--sample-rate", type=int, default=16_000)
parser.add_argument(
"--results-path",
type=str,
default=None,
help="Output directory. If not set, the audios will be stored following the 'audio' field specified in the input file.",
)
parser.add_argument("--cpu", action="store_true", help="run on CPU")
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/generate_waveform.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from tqdm import tqdm
from data_utils import load_audio_path
from fairseq.data.codedataset import get_f0_by_filename
def process_one(path, sr):
"""
Args:
path: audio file path
sr: sampling rate
"""
try:
# YAAPT throws errors in some rare cases
f0 = get_f0_by_filename(path, sr)
except Exception as e:
print(
f"WARNING: error when processing {path}. set f0 to zero. original error message:\n{e}"
)
f0 = None
return f0
def main(file_path, out_dir, nshards, rank, sampling_rate):
# load data
audio_paths = load_audio_path(file_path)
# shard
assert nshards <= len(audio_paths) and nshards > 0
shard_size = len(audio_paths) / nshards
s = int(round((rank - 1) * shard_size))
e = int(round(rank * shard_size))
audio_paths = audio_paths[s:e]
# process
path_to_f0 = {}
for i, audio_path in enumerate(tqdm(audio_paths)):
f0 = process_one(audio_path, sampling_rate)
path_to_f0[audio_path] = f0
print(f"finished processing {len(path_to_f0)} utterances ({s}-{e})")
f0_path = f"{out_dir}/f0_{rank}_{nshards}.pt"
os.makedirs(out_dir, exist_ok=True)
torch.save(path_to_f0, f0_path)
print(f"saved to {f0_path}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file_path")
parser.add_argument("out_dir")
parser.add_argument("--nshards", type=int, default=20)
parser.add_argument("--rank", type=int, default=1)
parser.add_argument("--sampling_rate", type=int, default=16000)
args = parser.parse_args()
main(**vars(args))
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/preprocess_f0.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import warnings
class Naive_F0_Decoder(torch.nn.Module):
def __init__(self, bounds_path, n_units=32):
super().__init__()
bounds = torch.load(bounds_path)
bounds = torch.from_numpy(bounds[n_units])
assert bounds.ndim == 1
pad = torch.tensor([-5.0, -5.0]) # bos, eos, pad are in the dictionary
centers = torch.cat(
[bounds[0:1], 0.5 * (bounds[1:] + bounds[:-1]), bounds[-1:], pad[:]]
)
self.embedding = torch.nn.Embedding.from_pretrained(
centers.unsqueeze(-1), freeze=True
)
self.max_n = self.embedding.weight.numel()
def forward(self, discrete_f0: torch.Tensor):
in_bounds = (0 <= discrete_f0).all() and (discrete_f0 < self.max_n).all()
if not in_bounds:
warnings.warn(
f"F0 contains some weird outputs: discrete_f0.max().item()={discrete_f0.max().item()} discrete_f0.min().item()={discrete_f0.min().item()}; "
f"while we have embeddings for {self.max_n} values. "
"Assuming this is a no-prosody model -- but be careful!"
)
mask = discrete_f0 >= self.max_n
discrete_f0 = discrete_f0.masked_fill(mask, self.max_n - 1)
return self.embedding(discrete_f0).squeeze(-1)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/naive_decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from tqdm import tqdm
class Stat:
def __init__(self, keep_raw=False):
self.x = 0.0
self.x2 = 0.0
self.z = 0.0 # z = logx
self.z2 = 0.0
self.n = 0.0
self.u = 0.0
self.keep_raw = keep_raw
self.raw = []
def update(self, new_x):
new_z = new_x.log()
self.x += new_x.sum()
self.x2 += (new_x**2).sum()
self.z += new_z.sum()
self.z2 += (new_z**2).sum()
self.n += len(new_x)
self.u += 1
if self.keep_raw:
self.raw.append(new_x)
@property
def mean(self):
return self.x / self.n
@property
def std(self):
return (self.x2 / self.n - self.mean**2) ** 0.5
@property
def mean_log(self):
return self.z / self.n
@property
def std_log(self):
return (self.z2 / self.n - self.mean_log**2) ** 0.5
@property
def n_frms(self):
return self.n
@property
def n_utts(self):
return self.u
@property
def raw_data(self):
assert self.keep_raw, "does not support storing raw data!"
return torch.cat(self.raw)
class F0Stat(Stat):
def update(self, new_x):
# assume unvoiced frames are 0 and consider only voiced frames
if new_x is not None:
super().update(new_x[new_x != 0])
def dump_speaker_f0_stat(speaker_to_f0_stat, out_prefix):
path = f"{out_prefix}.f0_stat.pt"
assert not os.path.exists(path)
d = {
speaker: {
"f0_mean": speaker_to_f0_stat[speaker].mean,
"f0_std": speaker_to_f0_stat[speaker].std,
"logf0_mean": speaker_to_f0_stat[speaker].mean_log,
"logf0_std": speaker_to_f0_stat[speaker].std_log,
}
for speaker in speaker_to_f0_stat
}
torch.save(d, path)
return d
def load_audio_path(path):
audio_paths = []
with open(path) as f:
for line in f.readlines():
sample = eval(line.strip())
audio_paths.append(sample["audio"])
return audio_paths
def load_f0(f0_dir, nshards):
path_to_f0 = {}
for rank in tqdm(range(1, nshards + 1), desc=f"load f0"):
f0_shard_path = f"{f0_dir}/f0_{rank}_{nshards}.pt"
shard_path_to_f0 = torch.load(f0_shard_path)
path_to_f0.update(shard_path_to_f0)
return path_to_f0
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class InferenceDataset:
def __init__(
self,
dataset,
prefix,
only_prefix=True,
presort_by_length=True,
filter_short=False,
min_length=None,
):
self.dataset = dataset
self.collater = self.dataset.collater
self.prefix = prefix
self.only_prefix = only_prefix
self.filter_short = filter_short
self.remapping = list(range(len(self.dataset)))
if min_length:
assert min_length >= prefix + 1
length_thr = prefix + 1 if not min_length else min_length
if filter_short:
self.remapping = list(
filter(
lambda i: self.dataset[i]["dur_source"].sum() > length_thr,
self.remapping,
)
)
print(
f"# the initial dataset of {len(self.dataset)} examples became {len(self.remapping)} after filtering"
f" examples shorter than {length_thr} (in duration units)"
)
if presort_by_length:
lengths = {index: dataset.size(index) for index in self.remapping}
self.remapping.sort(key=lambda i: lengths[i])
@property
def pads(self):
return self.dataset.pads
def __len__(self):
return len(self.remapping)
def original_size(self, k):
k = self.remapping[k]
return self.dataset.size(k)
def __getitem__(self, k):
k = self.remapping[k]
channels = self.dataset[k]
if self.prefix and self.only_prefix:
dur_channel = channels["dur_source"]
assert dur_channel.sum() >= self.prefix
token_times = dur_channel.cumsum(dim=-1)
cut_after = torch.searchsorted(token_times, torch.tensor(self.prefix))
r = {}
for channel_name, value in channels.items():
if isinstance(value, torch.Tensor) and "source" in channel_name:
# if self.filter_short: assert value.size(0) >= self.prefix
r[channel_name] = value[: cut_after + 1]
else:
r[channel_name] = value
r["prefix"] = cut_after + 1
else:
r = channels
return r
def explode_batch(batch, times):
if times == 1:
return batch
new_batch = {}
for key, value in batch.items():
if isinstance(value, torch.Tensor):
assert value.size(0) == 1
new_batch[key] = torch.cat([value] * times)
elif key in ["ntokens", "nsentences"]:
new_batch[key] = value * times
elif key in ["prefix", "filename"]:
new_batch[key] = value
elif key == "net_input":
new_batch[key] = explode_batch(value, times)
else:
assert False, key
return new_batch
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/inference_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import argparse
import pathlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--manifest", required=True)
parser.add_argument("--units", required=True)
parser.add_argument("--output", required=True)
parser.add_argument("--sample_rate", type=int, default=16_000)
args = parser.parse_args()
with open(args.manifest, "r") as manifest, open(args.units, "r") as units, open(
args.output, "w"
) as outp:
root = manifest.readline().strip()
root = pathlib.Path(root)
for manifest_line, unit_line in zip(manifest.readlines(), units.readlines()):
path, frames = manifest_line.split()
duration = int(frames) / float(args.sample_rate)
fname = root / path
speaker = fname.parent.parent.name
units = unit_line.split("|")[1]
print(
json.dumps(
dict(
audio=str(root / path),
duration=duration,
hubert_km100=units.strip(),
speaker=speaker,
)
),
file=outp,
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/scripts/join_units_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/sample/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch.multiprocessing as mp
import numpy as np
import json
import torch
from torch.distributions.categorical import Categorical
from fairseq import checkpoint_utils, options, utils
from fairseq.data.codedataset import CodeDataset, ExpressiveCodeDataConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from torch.utils.data import DataLoader, DistributedSampler
from fairseq.utils import move_to_cuda
import tqdm
import random
import pathlib
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent))
from inference_dataset import InferenceDataset, explode_batch
from naive_decoder import Naive_F0_Decoder
from truncated_laplace import truncated_laplace
CODETYPE_TO_FRAMETIME = {"cpc_km100": 0.01, "hubert": 0.02} # 10ms # 20ms
class TemperatureDecoder:
def __init__(self, Ts, discrete_dur=False, discrete_f0=False):
self.T_token, self.T_dur, self.T_f0 = Ts
self.discrete_dur = discrete_dur
self.discrete_f0 = discrete_f0
def __call__(self, output):
def sample_multinomial(key, T):
logits = output[key][:, -1, :].float()
return Categorical(logits=logits / T).sample().unsqueeze(-1)
def sample_laplace(key, T, truncate_at_zero):
mean = output[key][:, -1, :].float()
return truncated_laplace(mean=mean, T=T, truncate_by_zero=truncate_at_zero)
if self.T_token > 0:
new_tokens = sample_multinomial("token", self.T_token)
else:
new_tokens = output["token"][:, -1, :].argmax(dim=-1, keepdim=True)
if not self.discrete_dur and self.T_dur == 0:
new_durations = output["duration"][:, -1].round().int()
elif not self.discrete_dur and self.T_dur > 0:
new_durations = (
sample_laplace("duration", self.T_dur, truncate_at_zero=True)
.round()
.int()
)
elif self.discrete_dur and self.T_dur > 0:
new_durations = sample_multinomial("duration", self.T_dur)
elif self.discrete_dur and self.T_dur == 0:
new_durations = output["duration"][:, -1, :].argmax(dim=-1, keepdim=True)
else:
assert False
if not self.discrete_f0 and self.T_f0 == 0:
new_f0 = output["f0"][:, -1]
elif not self.discrete_f0 and self.T_f0 > 0:
new_f0 = sample_laplace("f0", self.T_f0, truncate_at_zero=False)
elif self.discrete_f0 and self.T_f0 > 0:
new_f0 = sample_multinomial("f0", self.T_f0)
elif self.discrete_f0 and self.T_f0 == 0:
new_f0 = output["f0"][:, -1, :].argmax(dim=-1, keepdim=True)
else:
assert False
return new_tokens, new_durations, new_f0
class FilterNamesDataset:
def __init__(self, dataset, fnames_path):
self.dataset = dataset
with open(fnames_path, "r") as fin:
fnames = set((eval(line)["audio"] for line in fin))
print(f"# will retrict the dataset for {len(fnames)} files")
self.indexes = []
for i, datapoint in enumerate(dataset):
if datapoint["filename"] in fnames:
self.indexes.append(i)
assert len(self.indexes) == len(fnames), f"{len(self.indexes)} {len(fnames)}"
self.collater = self.dataset.collater
self.discrete_dur = self.dataset.discrete_dur
self.discrete_f0 = self.dataset.discrete_f0
def __len__(self):
return len(self.indexes)
def __getitem__(self, k):
k = self.indexes[k]
return self.dataset[k]
def size(self, k):
k = self.indexes[k]
return self.dataset.size(k)
@torch.no_grad()
def do_sampling(
model,
batch,
eos_token,
decoder,
autoregressive_steps=100,
teacher_force_tokens=False,
teacher_force_duration=False,
teacher_force_f0=False,
match_duration=False,
):
def autoregressive_step_(output, autoregressive_steps):
new_tokens, new_durations, new_f0 = decoder(output)
n = output["token"].size(1) if output["token"].ndim == 3 else 1
if teacher_force_tokens:
new_tokens = batch["target"][:, n - 1].unsqueeze(-1)
if teacher_force_duration:
new_durations = batch["dur_target"][:, n - 1].unsqueeze(-1)
if teacher_force_f0:
new_f0 = batch["f0_target"][:, n - 1].unsqueeze(-1)
batch["net_input"]["src_tokens"] = torch.cat(
[batch["net_input"]["src_tokens"], new_tokens], dim=1
)
batch["net_input"]["dur_src"] = torch.cat(
[batch["net_input"]["dur_src"], new_durations], dim=1
)
batch["net_input"]["f0_src"] = torch.cat(
[batch["net_input"]["f0_src"], new_f0], dim=1
)
outputs = []
if teacher_force_tokens or teacher_force_duration or teacher_force_f0:
max_time = batch["target"].size(1)
prefix_time = batch["net_input"]["src_tokens"].size(1)
autoregressive_steps = max_time - prefix_time + 1 # should be 0
for _ in range(autoregressive_steps):
output = model(**batch["net_input"])
last_steps = (
output["token"][:, -1, ...],
output["duration"][:, -1, ...],
output["f0"][:, -1, ...],
)
outputs.append(last_steps)
autoregressive_step_(output, autoregressive_steps)
tokens, duration, f0 = (
batch["net_input"]["src_tokens"],
batch["net_input"]["dur_src"],
batch["net_input"]["f0_src"],
)
if (
match_duration
and (batch["dur_target"].sum(dim=-1) < duration.sum(dim=-1)).all()
):
break
return tokens, duration, f0, outputs
def unroll_duration(token_stream, duration_stream):
assert len(token_stream) == len(
duration_stream
), f"{len(token_stream)} != {len(duration_stream)}"
non_positive_durations = sum(d <= 0 for d in duration_stream)
if non_positive_durations > 0:
print(
f"# {non_positive_durations} durations are non-positive, they will be capped to 1"
)
result = []
duration_stream_rounded_capped = [max(1, int(round(x))) for x in duration_stream]
for t, d in zip(token_stream, duration_stream_rounded_capped):
result.extend([t] * d)
return result
def realign_shifted_streams(tokens, durations, F0s, shifts):
"""
Durations are shifted by 1, F0 by 2
>>> tokens = ["<s>", "t1", "t2", "t3", "</s>", "x", "x"]
>>> durations = ["<0>", "<0>", "d1", "d2", "d3", "<0>", "x"]
>>> F0s = ["<0>", "<0>", "<0>", "f1", "f2", "f3", "<0>"]
>>> shifts = [1,2]
>>> realign_shifted_streams(tokens, durations, F0s, shifts)
(['<s>', 't1', 't2', 't3', '</s>'], ['<0>', 'd1', 'd2', 'd3', '<0>'], ['<0>', 'f1', 'f2', 'f3', '<0>'])
"""
max_shift = max(shifts)
if max_shift > 0:
shift_durations, shift_F0s = shifts
tokens = tokens[:-max_shift]
durations = durations[shift_durations:]
if shift_durations < max_shift:
durations = durations[: -(max_shift - shift_durations)]
if F0s is not None:
F0s = F0s[shift_F0s:]
if shift_F0s < max_shift:
F0s = F0s[: -(max_shift - shift_F0s)]
assert len(tokens) == len(durations), f"{len(tokens)} =! {len(durations)}"
if F0s is not None:
assert len(tokens) == len(F0s), f"{len(tokens)} =! {len(F0s)}"
return tokens, durations, F0s
def maybe_cut_eos(produced_tokens, produced_duration, produced_f0, eos_idx):
if eos_idx in produced_tokens:
eos_index = produced_tokens.index(eos_idx)
produced_tokens = produced_tokens[:eos_index]
produced_duration = produced_duration[:eos_index]
produced_f0 = produced_f0[:eos_index]
return produced_tokens, produced_duration, produced_f0
def maybe_filter_pad(produced_tokens, produced_duration, produced_f0, pad_idx):
if pad_idx not in produced_tokens:
return produced_tokens, produced_duration, produced_f0
assert len(produced_tokens) == len(produced_duration) == len(produced_f0)
print("<pad> is detected in the output!")
filtered_tokens, filtered_duration, filtered_f0 = [], [], []
for t, d, f in zip(produced_tokens, produced_duration, produced_f0):
if t != pad_idx:
filtered_tokens.append(t)
filtered_duration.append(d)
filtered_f0.append(f)
return filtered_tokens, filtered_duration, filtered_f0
def match_duration(produced_tokens, produced_duration, produced_f0, target_duration):
"""
>>> tokens = ['t'] * 4
>>> F0s = ['f0'] * 4
>>> produced_duration = [1, 10, 10, 10]
>>> match_duration(tokens, produced_duration, F0s, target_duration=100)
(['t', 't', 't', 't'], [1, 10, 10, 10], ['f0', 'f0', 'f0', 'f0'])
>>> match_duration(tokens, produced_duration, F0s, target_duration=5)
(['t', 't'], [1, 4], ['f0', 'f0'])
"""
if sum(produced_duration) <= target_duration:
return produced_tokens, produced_duration, produced_f0
running_duration = 0
filtered_duration = []
for next_tok_duration in produced_duration:
if running_duration + next_tok_duration < target_duration:
filtered_duration.append(next_tok_duration)
running_duration += next_tok_duration
else:
to_add = target_duration - running_duration
assert to_add <= next_tok_duration
filtered_duration.append(to_add)
break
produced_duration = filtered_duration
assert sum(produced_duration) == target_duration
n_tok = len(filtered_duration)
return produced_tokens[:n_tok], produced_duration, produced_f0[:n_tok]
def main(rank, world_size, args):
if world_size > 1:
torch.distributed.init_process_group(
backend="gloo", init_method="env://", world_size=world_size, rank=rank
)
torch.cuda.set_device(rank)
raw_args = args
args = convert_namespace_to_omegaconf(args)
if args.common.seed is not None:
random.seed(args.common.seed)
np.random.seed(args.common.seed)
utils.set_torch_seed(args.common.seed)
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[raw_args.path], arg_overrides={"data": args.task.data}
)
tgt_dict = task.target_dictionary
for model in models:
model.prepare_for_inference_(args)
model.cuda().eval()
if raw_args.fp16:
model = model.half()
model = models[0]
config = ExpressiveCodeDataConfig(args.task.data)
dataset = CodeDataset(
manifest=config.manifests[raw_args.subset],
dictionary=task.source_dictionary,
dur_dictionary=task.source_duration_dictionary,
f0_dictionary=task.source_f0_dictionary,
config=config,
discrete_dur=task.cfg.discrete_duration,
discrete_f0=task.cfg.discrete_f0,
log_f0=task.cfg.log_f0,
normalize_f0_mean=task.cfg.normalize_f0_mean,
normalize_f0_std=task.cfg.normalize_f0_std,
interpolate_f0=task.cfg.interpolate_f0,
shifts=task.cfg.stream_shifts,
return_filename=True,
strip_filename=False,
)
tgt_dict = task.target_dictionary
shifts = dataset.shifts.dur, dataset.shifts.f0
max_shift = max(shifts)
fname = raw_args.output
if world_size > 1:
fname += f"_{rank}"
output_file = open(fname, "w")
if raw_args.filter_names:
dataset = FilterNamesDataset(dataset, raw_args.filter_names)
dataset = InferenceDataset(dataset, raw_args.prefix_length, filter_short=True)
print(f"Dataset size {len(dataset)}")
sampler = (
None
if world_size == 1
else DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=False
)
)
dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
collate_fn=dataset.collater,
sampler=sampler,
)
Ts = raw_args.T_token, raw_args.T_duration, raw_args.T_f0
decoder = TemperatureDecoder(
Ts, discrete_dur=task.cfg.discrete_duration, discrete_f0=task.cfg.discrete_f0
)
dataset_size = len(dataset)
f0_decoder = None
if raw_args.f0_discretization_bounds:
assert task.cfg.discrete_f0
f0_decoder = Naive_F0_Decoder(raw_args.f0_discretization_bounds).cuda()
pbar = (
tqdm.tqdm(
total=dataset_size
if raw_args.max_samples is None
else min(raw_args.max_samples, dataset_size)
)
if world_size == 1
else None
)
samples_produced = 0
for batch in dataloader:
if (
raw_args.max_samples is not None
and samples_produced >= raw_args.max_samples
):
break
prefix = batch["prefix"][0]
batch = explode_batch(batch, raw_args.batch_explosion_rate)
batch = move_to_cuda(batch)
if not raw_args.short_curcuit:
produced_tokens, produced_durations, produced_f0, _ = do_sampling(
models[0],
batch,
tgt_dict.eos(),
decoder,
autoregressive_steps=raw_args.max_length - prefix + max_shift,
teacher_force_tokens=raw_args.teacher_force_tokens,
match_duration=raw_args.match_duration,
teacher_force_duration=raw_args.teacher_force_duration,
teacher_force_f0=raw_args.teacher_force_f0,
)
# stip entries corresponding to <s>
produced_tokens = produced_tokens[:, 1:]
produced_durations = produced_durations[:, 1:]
produced_f0 = produced_f0[:, 1:]
else:
max_length = raw_args.max_length + max_shift
produced_tokens, produced_durations, produced_f0 = (
batch["target"][:, :max_length],
batch["dur_target"][:, :max_length],
batch["f0_target"][:, :max_length],
)
if f0_decoder is not None:
produced_f0 = f0_decoder(produced_f0)
produced_tokens, produced_durations, produced_f0 = (
produced_tokens.cpu().tolist(),
produced_durations.cpu().tolist(),
produced_f0.cpu().tolist(),
)
bsz = batch["target"].size(0)
assert bsz == raw_args.batch_explosion_rate
for i in range(bsz):
if (
raw_args.max_samples is not None
and samples_produced >= raw_args.max_samples
):
break
produced_tokens_i = produced_tokens[i]
produced_durations_i = produced_durations[i]
produced_f0_i = produced_f0[i]
(
produced_tokens_i,
produced_durations_i,
produced_f0_i,
) = realign_shifted_streams(
produced_tokens_i, produced_durations_i, produced_f0_i, shifts
)
produced_tokens_i, produced_durations_i, produced_f0_i = maybe_cut_eos(
produced_tokens_i, produced_durations_i, produced_f0_i, tgt_dict.eos()
)
produced_tokens_i, produced_durations_i, produced_f0_i = maybe_filter_pad(
produced_tokens_i, produced_durations_i, produced_f0_i, tgt_dict.pad()
)
if raw_args.match_duration:
# NB: here we cheat a bit and use that padding has duration 0
# so no need to re-align and remove padding
dur_target_i = batch["dur_target"][i, :].sum().item()
produced_tokens_i, produced_durations_i, produced_f0_i = match_duration(
produced_tokens_i, produced_durations_i, produced_f0_i, dur_target_i
)
if raw_args.cut_prompt:
produced_tokens_i, produced_durations_i, produced_f0_i = (
produced_tokens_i[prefix:],
produced_durations_i[prefix:],
produced_f0_i[prefix:],
)
prompt_fname = batch["filename"][0]
fname = str(pathlib.Path(prompt_fname).with_suffix("")) + f"__{i}.wav"
token_stream = unroll_duration(produced_tokens_i, produced_durations_i)
f0_stream = unroll_duration(produced_f0_i, produced_durations_i)
output_line = json.dumps(
{
"audio": fname,
"prompt": prompt_fname,
raw_args.code_type: " ".join(map(str, token_stream)),
"duration": round(
sum(produced_durations_i)
* CODETYPE_TO_FRAMETIME[raw_args.code_type],
3,
),
"raw_duration": produced_durations_i,
"raw_f0": produced_f0_i,
"f0": [round(f0, 3) for f0 in f0_stream],
}
)
print(output_line, file=output_file)
if pbar:
pbar.update(1)
samples_produced += 1
if raw_args.debug:
break
output_file.close()
if world_size > 1:
# important that everything is flushed before aggregating
torch.distributed.barrier()
if world_size > 1 and rank == 0:
with open(raw_args.output, "w") as fout:
for i in range(world_size):
f = raw_args.output + f"_{i}"
with open(f, "r") as fin:
fout.write(fin.read())
os.remove(f)
def cli_main():
parser = options.get_interactive_generation_parser()
parser.add_argument(
"--prefix-length",
type=int,
default=1,
help="Prompt prefix length (including <s>)",
)
parser.add_argument("--output", type=str, default=None, required=True)
parser.add_argument(
"--debug", action="store_true", help="Process only the first batch"
)
parser.add_argument(
"--ignore-durations",
action="store_true",
help="If set, the duration stream is ignored",
)
parser.add_argument(
"--max-length", type=int, default=200, help="Maximal produced length"
)
parser.add_argument(
"--code-type", choices=["cpc_km100", "hubert"], default="cpc_km100"
)
parser.add_argument("--max-samples", type=int, default=None)
parser.add_argument("--prompt-duration-scaler", type=float, default=1.0)
parser.add_argument("--teacher-force-tokens", action="store_true", default=False)
parser.add_argument("--teacher-force-duration", action="store_true", default=False)
parser.add_argument("--teacher-force-f0", action="store_true", default=False)
parser.add_argument("--filter-names", type=str, default=None)
parser.add_argument(
"--match-duration",
action="store_true",
help="Do not produce sequences longer that ground-truth",
)
parser.add_argument(
"--cut-prompt",
action="store_true",
help="Remove prompt from the produced audio",
)
parser.add_argument(
"--short-curcuit", action="store_true", help="Use 'target' as a sample"
)
parser.add_argument("--f0-discretization-bounds", type=str, default=None)
parser.add_argument("--batch-explosion-rate", type=int, default=1)
parser.add_argument("--T-token", type=float, default=1.0)
parser.add_argument("--T-duration", type=float, default=1.0)
parser.add_argument("--T-f0", type=float, default=1.0)
parser.add_argument(
"--subset", type=str, default="valid", choices=["test", "valid"]
)
args = options.parse_args_and_arch(parser)
assert (
args.prefix_length >= 1
), "Prefix length includes bos token <s>, hence the minimum is 1."
assert all(
t >= 0 for t in [args.T_token, args.T_f0, args.T_duration]
), "T must be non-negative!"
world_size = torch.cuda.device_count()
if world_size > 1:
import random
mp.set_start_method("spawn", force=True)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(random.randint(10_000, 50_000))
print(f"Using {world_size} devices, master port {os.environ['MASTER_PORT']}")
mp.spawn(
main,
nprocs=world_size,
args=(
world_size,
args,
),
join=True,
)
else:
main(rank=0, world_size=world_size, args=args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/sample/sample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import scipy
import torch
import torch.multiprocessing as mp
from fairseq import checkpoint_utils, options
from fairseq.data.codedataset import CodeDataset, ExpressiveCodeDataConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from torch.utils.data import DataLoader, DistributedSampler
from fairseq.utils import move_to_cuda
from fairseq import utils
from fairseq.criterions.speech_ulm_criterion import nll_loss, mae_loss
import time
from types import SimpleNamespace
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).parent.parent.resolve()))
from naive_decoder import Naive_F0_Decoder
from inference_dataset import InferenceDataset, explode_batch
from sample.sample import do_sampling, TemperatureDecoder, FilterNamesDataset
try:
from nltk.translate.bleu_score import sentence_bleu
except ImportError:
print("Please install nltk: `pip install --user -U nltk`")
raise
@torch.no_grad()
def teacher_force_everything(
args, dataset, model, criterion, tgt_dict, rank, world_size
):
prefix = args.prefix_length
f0_decoder = None
if args.dequantize_prosody:
assert dataset.discrete_f0
print("Reporting MAE for a discrete model")
f0_decoder = Naive_F0_Decoder(
args.f0_discretization_bounds, dataset.config.f0_vq_n_units
).cuda()
dataset = InferenceDataset(
dataset,
prefix=args.prefix_length,
only_prefix=False,
filter_short=True,
presort_by_length=True,
)
sampler = (
None
if world_size == 1
else DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=False
)
)
dataloader = DataLoader(
dataset,
args.batch_size,
shuffle=False,
collate_fn=dataset.collater,
sampler=sampler,
)
total_token_loss, total_duration_loss, total_f0_loss, total_tokens = (
0.0,
0.0,
0.0,
0.0,
)
i = 0
for batch in dataloader:
i += 1
batch = move_to_cuda(batch)
output = model(**batch["net_input"])
tokens, durations, f0 = output["token"], output["duration"], output["f0"]
durations, f0 = durations.squeeze(), f0.squeeze()
token_loss = nll_loss(
tokens[:, prefix - 1 :],
batch["target"][:, prefix - 1 :].contiguous(),
batch["mask"][:, prefix - 1 :].contiguous(),
reduce=True,
)
if args.dequantize_prosody:
durations = durations.argmax(dim=-1)
duration_loss = mae_loss(
durations[:, prefix - 1 :].contiguous().float(),
batch["dur_target"][:, prefix - 1 :].contiguous().float(),
batch["dur_mask"][:, prefix - 1 :].contiguous(),
reduce=True,
)
else:
duration_loss = criterion.dur_loss_fn(
durations[:, prefix - 1 :].contiguous(),
batch["dur_target"][:, prefix - 1 :].contiguous(),
batch["dur_mask"][:, prefix - 1 :].contiguous(),
reduce=True,
)
if f0_decoder:
f0 = f0.argmax(dim=-1)
f0 = f0_decoder(f0).squeeze(-1)
f0_target = batch["raw_f0"]
f0_loss = mae_loss(
f0[:, prefix - 1 :].contiguous(),
f0_target[:, prefix - 1 :].contiguous(),
batch["f0_mask"][:, prefix - 1 :].contiguous(),
reduce=True,
)
else:
f0_loss = criterion.f0_loss_fn(
f0[:, prefix - 1 :].contiguous(),
batch["f0_target"][:, prefix - 1 :].contiguous(),
batch["f0_mask"][:, prefix - 1 :].contiguous(),
reduce=True,
)
n_tokens = (~batch["dur_mask"])[:, prefix - 1 :].sum()
total_token_loss += token_loss.item()
total_duration_loss += duration_loss.item()
total_f0_loss += f0_loss.item()
total_tokens += n_tokens.item()
if args.debug and i > 5:
break
values = torch.tensor([total_token_loss, total_duration_loss, total_f0_loss])
normalizers = torch.tensor([total_tokens for _ in range(3)])
return values, normalizers
def get_bleu(produced_tokens, target_tokens, tgt_dict):
assert target_tokens.ndim == 1
assert produced_tokens.size(1) == target_tokens.size(0)
# we can have padding due to shifted channels
shift = 0
for token in reversed(target_tokens.cpu().tolist()):
if token in [tgt_dict.pad(), tgt_dict.eos()]:
shift += 1
else:
break
target_tokens = target_tokens[:-shift]
produced_tokens = produced_tokens[:, :-shift]
string_target = tgt_dict.string(target_tokens).split()
string_candidates = [
tgt_dict.string(produced_tokens[i, :]).split()
for i in range(produced_tokens.size(0))
]
bleu3 = sentence_bleu(
references=string_candidates,
hypothesis=string_target,
weights=(1.0 / 3, 1.0 / 3, 1.0 / 3),
)
return bleu3
@torch.no_grad()
def continuation(args, dataset, model, criterion, tgt_dict, rank, world_size):
is_discrete_duration = dataset.discrete_dur
is_discrete_f0 = dataset.discrete_f0
f0_decoder = None
if args.dequantize_prosody:
assert dataset.discrete_f0
print("Reporting MAE F0 for a discrete model")
f0_decoder = Naive_F0_Decoder(
args.f0_discretization_bounds, dataset.config.f0_vq_n_units
).cuda()
dataset = InferenceDataset(
dataset, args.prefix_length, filter_short=True, presort_by_length=True
)
sampler = (
None
if world_size == 1
else DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=False
)
)
dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
collate_fn=dataset.collater,
sampler=sampler,
)
Ts = args.T_token, args.T_duration, args.T_f0
decoder = TemperatureDecoder(
Ts, discrete_dur=is_discrete_duration, discrete_f0=is_discrete_f0
)
running_stats = SimpleNamespace(
token_bleu=0.0,
duration_nll=0.0,
duration_mae=0.0,
f0_nll=0.0,
f0_mae=0.0,
n_tokens=0.0,
n_sentences=0.0,
f0_sum=0.0,
f0_sum_sq=0.0,
dur_sum=0.0,
dur_sum_sq=0.0,
)
for i, batch in enumerate(dataloader):
batch = explode_batch(batch, args.batch_explosion_rate)
bsz = batch["target"].size(0)
batch = move_to_cuda(batch)
prefix = batch["prefix"][0]
max_length_to_unroll = batch["target"].size(1)
prefix_length = batch["net_input"]["src_tokens"].size(1)
steps = max_length_to_unroll - prefix_length + 1
assert steps > 0
produced_tokens, produced_durations, produced_f0, outputs = do_sampling(
model,
batch,
tgt_dict.eos(),
decoder,
autoregressive_steps=steps,
teacher_force_tokens=args.teacher_force_tokens,
teacher_force_duration=args.teacher_force_duration,
teacher_force_f0=args.teacher_force_f0,
)
if args.teacher_force_tokens:
assert (produced_tokens[:, 1:] == batch["target"]).all()
if args.teacher_force_duration:
assert (produced_durations[:, 1:] == batch["dur_target"]).all()
if args.teacher_force_f0:
assert (produced_f0[:, 1:] == batch["f0_target"]).all()
dur_target = batch["dur_target"][:, prefix - 1 :].contiguous()
f0_target = batch["f0_target"][:, prefix - 1 :].contiguous()
f0_mask = batch["f0_mask"][:, prefix - 1 :].contiguous()
dur_mask = batch["dur_mask"][:, prefix - 1 :].contiguous()
duration_mae = mae_loss(
produced_durations[:, prefix:].float(),
dur_target.float(),
dur_mask,
reduce=False,
)
min_duration_mae = duration_mae.view(bsz, -1).sum(dim=-1).min(dim=0)[0]
running_stats.duration_mae += min_duration_mae
running_stats.dur_sum += (
produced_durations[:, prefix:].float() * (~dur_mask)
).sum() / args.batch_explosion_rate
running_stats.dur_sum_sq += (
produced_durations[:, prefix:].float() * (~dur_mask)
).pow(2.0).sum() / args.batch_explosion_rate
if is_discrete_duration:
duration_loss = criterion.dur_loss_fn(
torch.stack([x[1] for x in outputs], dim=1),
dur_target,
dur_mask,
reduce=False,
)
min_duration_loss = duration_loss.view(bsz, -1).sum(dim=-1).min(dim=0)[0]
running_stats.duration_nll += min_duration_loss
if f0_decoder: # can only exist for discrete F0 models
decoded_produced_f0 = f0_decoder(produced_f0[:, prefix:])
decoded_f0_target = batch["raw_f0"][:, prefix - 1 :].contiguous()
if produced_f0.ndim == 3:
decoded_produced_f0 = decoded_produced_f0.squeeze(2)
decoded_f0_target = decoded_f0_target.squeeze(2)
f0_mae = mae_loss(
decoded_produced_f0, decoded_f0_target, f0_mask, reduce=False
)
f0_mae = f0_mae.view(bsz, -1).sum(dim=-1).min(dim=0)[0]
running_stats.f0_mae += f0_mae
f0_loss = criterion.f0_loss_fn(
torch.stack([x[2] for x in outputs], dim=1),
f0_target.long(),
f0_mask,
reduce=False,
)
f0_loss = f0_loss.view(bsz, -1).sum(dim=-1).min(dim=0)[0]
running_stats.f0_nll += f0_loss
running_stats.f0_sum += (
decoded_produced_f0 * (~f0_mask)
).sum() / args.batch_explosion_rate
running_stats.f0_sum_sq += (decoded_produced_f0 * (~f0_mask)).pow(
2.0
).sum() / args.batch_explosion_rate
else:
assert not is_discrete_duration
f0_loss = mae_loss(
produced_f0[:, prefix:], f0_target, f0_mask, reduce=False
)
f0_loss = f0_loss.view(bsz, -1).sum(dim=-1).min(dim=0)[0]
running_stats.f0_mae += f0_loss
running_stats.f0_sum += (
produced_f0[:, prefix:].sum() / args.batch_explosion_rate
)
running_stats.f0_sum_sq += (
produced_f0[:, prefix:].pow(2.0).sum() / args.batch_explosion_rate
)
running_stats.n_tokens += (~dur_mask)[0, ...].sum()
token_loss = get_bleu(
produced_tokens[:, prefix:], batch["target"][0, prefix - 1 :], tgt_dict
)
running_stats.token_bleu += token_loss
running_stats.n_sentences += 1
if args.debug:
break
values = torch.tensor(
[
running_stats.token_bleu,
running_stats.duration_nll,
running_stats.duration_mae,
running_stats.f0_nll,
running_stats.f0_mae,
running_stats.f0_sum,
running_stats.f0_sum_sq,
running_stats.dur_sum,
running_stats.dur_sum_sq,
]
)
normalizers = torch.tensor(
[running_stats.n_sentences] + [running_stats.n_tokens] * 8
)
return values, normalizers
@torch.no_grad()
def correlation(args, dataset, model, criterion, tgt_dict, rank, world_size):
is_discrete_duration = dataset.discrete_dur
is_discrete_f0 = dataset.discrete_f0
f0_decoder = None
if is_discrete_f0:
assert dataset.discrete_f0
f0_decoder = Naive_F0_Decoder(
args.f0_discretization_bounds, dataset.config.f0_vq_n_units
).cuda()
if is_discrete_f0:
assert f0_decoder # correlation on tokens is meaningless
dataset = InferenceDataset(
dataset,
args.prefix_length,
filter_short=True,
presort_by_length=True,
min_length=args.min_length,
)
sampler = (
None
if world_size == 1
else DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=False
)
)
dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
collate_fn=dataset.collater,
sampler=sampler,
)
Ts = args.T_token, args.T_duration, args.T_f0
decoder = TemperatureDecoder(
Ts, discrete_dur=is_discrete_duration, discrete_f0=is_discrete_f0
)
mean_dur_prefix, mean_dur_cont = [], []
mean_f0_prefix, mean_f0_cont = [], []
for batch in dataloader:
batch = explode_batch(batch, args.batch_explosion_rate)
batch = move_to_cuda(batch)
assert len(batch["prefix"]) == 1
if args.teacher_force_tokens:
autoregressive_steps = batch["target"].size(1) - args.prefix_length - 1
else:
autoregressive_steps = args.max_length - args.prefix_length # + max_shift?
if args.copy_target:
produced_durations, produced_f0 = batch["dur_target"], batch["f0_target"]
else:
_, produced_durations, produced_f0, outputs = do_sampling(
model,
batch,
tgt_dict.eos(),
decoder,
autoregressive_steps=autoregressive_steps,
teacher_force_tokens=args.teacher_force_tokens,
teacher_force_duration=args.teacher_force_duration,
teacher_force_f0=args.teacher_force_f0,
)
# first tokens actually correspond to BOS
produced_durations = produced_durations[:, 1:]
produced_f0 = produced_f0[:, 1:]
dur_target = batch["dur_target"]
if is_discrete_duration:
produced_durations = produced_durations.float()
dur_target = dur_target.float()
if is_discrete_f0:
produced_f0 = f0_decoder(produced_f0).squeeze(-1)
f0_target = batch["raw_f0"]
else:
f0_target = batch["f0_target"]
# prefix values
prefix = batch["prefix"][0]
dur_prefix_mean = dur_target[:, :prefix].sum(dim=-1) / (
(~batch["dur_mask"][:, :prefix]).sum(dim=-1)
)
non_voiced = f0_target[:, :prefix] == 0.0
f0_mask = batch["f0_mask"][:, :prefix].logical_or(non_voiced)
f0_prefix_mean = f0_target[:, :prefix].sum(dim=-1) / ((~f0_mask).sum(dim=-1))
# continuation values
dur_cont_mean = produced_durations[:, prefix:].sum(dim=-1) / (
(~batch["dur_mask"][:, prefix:]).sum(dim=-1)
)
non_voiced = produced_f0[:, prefix:] == 0.0
f0_mask = non_voiced
f0_cont_mean = produced_f0[:, prefix:].sum(dim=-1) / ((~f0_mask).sum(dim=-1))
assert not f0_cont_mean.isnan().any()
mean_dur_prefix.append(dur_prefix_mean.cpu())
mean_dur_cont.append(dur_cont_mean.cpu())
mean_f0_prefix.append(f0_prefix_mean.cpu())
mean_f0_cont.append(f0_cont_mean.cpu())
if args.debug and len(mean_dur_prefix) > 10:
break
mean_dur_prefix, mean_dur_cont = torch.cat(mean_dur_prefix), torch.cat(
mean_dur_cont
)
mean_f0_prefix, mean_f0_cont = torch.cat(mean_f0_prefix), torch.cat(mean_f0_cont)
return mean_dur_prefix, mean_dur_cont, mean_f0_prefix, mean_f0_cont
def main(rank, world_size, args):
start = time.time()
if world_size > 1:
torch.distributed.init_process_group(
backend="gloo", init_method="env://", world_size=world_size, rank=rank
)
torch.cuda.set_device(rank % torch.cuda.device_count())
raw_args = args
args = convert_namespace_to_omegaconf(args)
if args.common.seed is not None:
np.random.seed(args.common.seed)
utils.set_torch_seed(args.common.seed)
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[raw_args.path], arg_overrides={"data": args.task.data}
)
tgt_dict = task.target_dictionary
for model in models:
model.prepare_for_inference_(args)
model.cuda().eval()
if raw_args.fp16:
model = model.half()
model = models[0]
config = ExpressiveCodeDataConfig(args.task.data)
dataset = CodeDataset(
manifest=config.manifests[raw_args.eval_subset],
dictionary=task.source_dictionary,
dur_dictionary=task.source_duration_dictionary,
f0_dictionary=task.source_f0_dictionary,
config=config,
discrete_dur=task.cfg.discrete_duration,
discrete_f0=task.cfg.discrete_f0,
log_f0=task.cfg.log_f0,
normalize_f0_mean=task.cfg.normalize_f0_mean,
normalize_f0_std=task.cfg.normalize_f0_std,
interpolate_f0=task.cfg.interpolate_f0,
shifts=task.cfg.stream_shifts,
return_filename=True,
strip_filename=False,
return_continuous_f0=raw_args.dequantize_prosody,
)
if raw_args.filter_names:
dataset = FilterNamesDataset(dataset, raw_args.filter_names)
criterion = task.build_criterion(model_args.criterion)
name2metric = {
"continuation": continuation,
"teacher_force_everything": teacher_force_everything,
"correlation": correlation,
}
name2keys = {
"continuation": (
"Token BLEU3",
"Duration NLL",
"Duration MAE",
"F0 NLL",
"F0 MAE",
"F0 sum",
"F0 sum_sq",
"Dur sum",
"Dur sum_sq",
),
"teacher_force_everything": ("token_loss", "duration_loss", "f0_loss"),
"correlation": ("Duration corr", "F0 corr"),
}
metric_name = raw_args.metric
metric = name2metric[metric_name]
results = metric(raw_args, dataset, model, criterion, tgt_dict, rank, world_size)
values = None
if metric_name not in [
"correlation",
]:
values, normalizers = results
values = maybe_aggregate_normalize(values, normalizers, world_size)
elif metric_name == "correlation":
values = maybe_aggregate_correlations(results, world_size)
else:
assert False
assert values is not None
summary = dict(zip(name2keys[raw_args.metric], values.tolist()))
if metric_name == "continuation":
summary["F0 Std"] = np.sqrt(-summary["F0 sum"] ** 2 + summary["F0 sum_sq"])
summary["Dur Std"] = np.sqrt(-summary["Dur sum"] ** 2 + summary["Dur sum_sq"])
del summary["F0 sum"]
del summary["F0 sum_sq"]
del summary["Dur sum"]
del summary["Dur sum_sq"]
summary["metric"] = metric_name
if rank == 0:
print(summary)
if raw_args.wandb:
wandb_results(summary, raw_args)
print("# finished in ", time.time() - start, "seconds")
def wandb_results(summary, raw_args):
import wandb
run = wandb.init(
project=raw_args.wandb_project_name, tags=raw_args.wandb_tags.split(",")
)
run.config.metric = raw_args.metric
run.config.model = raw_args.path
run.config.data = raw_args.data
if raw_args.wandb_run_name:
run.name = raw_args.wandb_run_name
run.save()
wandb.log(summary)
wandb.finish()
def maybe_aggregate_normalize(values, normalizers, world_size):
if world_size > 1:
torch.distributed.barrier()
torch.distributed.all_reduce_multigpu([values])
torch.distributed.all_reduce_multigpu([normalizers])
return values / normalizers
def maybe_aggregate_correlations(results, world_size):
if world_size > 1:
output = [None for _ in range(world_size)]
torch.distributed.all_gather_object(output, results)
mean_dur_prefix, mean_dur_cont, mean_f0_prefix, mean_f0_cont = [
torch.cat([x[i] for x in output]) for i in range(4)
]
else:
mean_dur_prefix, mean_dur_cont, mean_f0_prefix, mean_f0_cont = results
corr_dur = scipy.stats.pearsonr(mean_dur_prefix.numpy(), mean_dur_cont.numpy())[0]
corr_f0 = scipy.stats.pearsonr(mean_f0_prefix.numpy(), mean_f0_cont.numpy())[0]
values = torch.tensor([corr_dur, corr_f0])
return values
def cli_main():
parser = options.get_interactive_generation_parser()
parser.add_argument(
"--prefix-length",
type=int,
default=1,
help="Prompt prefix length (including <s>)",
)
parser.add_argument(
"--duration-scale",
type=float,
default=1,
help="Multiply durations by the given scaler",
)
parser.add_argument(
"--debug", action="store_true", help="Process only the first batch"
)
parser.add_argument("--n_hypotheses", type=int, default=1)
parser.add_argument("--filter-names", type=str, default=None)
parser.add_argument(
"--max-length", type=int, default=200, help="Maximal produced length"
)
parser.add_argument("--teacher-force-tokens", action="store_true", default=False)
parser.add_argument("--teacher-force-duration", action="store_true", default=False)
parser.add_argument("--teacher-force-f0", action="store_true", default=False)
parser.add_argument("--copy-target", action="store_true", default=False)
parser.add_argument("--min-length", type=int, default=None)
parser.add_argument("--f0-discretization-bounds", type=str, default=None)
parser.add_argument("--dequantize-prosody", action="store_true")
parser.add_argument("--batch-explosion-rate", type=int, default=1)
parser.add_argument(
"--metric",
choices=["continuation", "teacher_force_everything", "correlation"],
required=True,
)
parser.add_argument("--wandb", action="store_true")
parser.add_argument("--wandb-project-name", type=str, default="eslm")
parser.add_argument("--wandb-tags", type=str, default="")
parser.add_argument("--wandb-run-name", type=str, default="")
parser.add_argument("--T-token", type=float, default=1.0)
parser.add_argument("--T-duration", type=float, default=1.0)
parser.add_argument("--T-f0", type=float, default=1.0)
parser.add_argument("--n-workers", type=int, default=1)
parser.add_argument(
"--eval-subset", type=str, default="valid", choices=["valid", "test"]
)
args = options.parse_args_and_arch(parser)
assert (
args.prefix_length >= 1
), "Prefix length includes bos token <s>, hence the minimum is 1."
assert args.temperature >= 0.0, "T must be non-negative!"
if args.dequantize_prosody:
assert args.f0_discretization_bounds
world_size = args.n_workers or torch.cuda.device_count()
if world_size > 1:
import random
mp.set_start_method("spawn", force=True)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(random.randint(10_000, 50_000))
mp.spawn(
main,
nprocs=world_size,
args=(
world_size,
args,
),
join=True,
)
else:
main(rank=0, world_size=world_size, args=args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/eval/cont_metrics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/pgslm/eval/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import joblib
import numpy as np
from examples.textless_nlp.gslm.speech2unit.clustering.utils import get_audio_files
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_features
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Quantize using K-means clustering over acoustic features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--out_dir_path",
required=True,
type=str,
help="File path of quantized output.",
)
parser.add_argument(
"--extension", type=str, default=".flac", help="Features file path"
)
return parser
def one_hot(feat, n_clusters):
return np.eye(n_clusters)[feat]
def main(args, logger):
# Feature extraction
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = get_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=1.0,
flatten=False,
)
logger.info(f"Features extracted for {len(features_batch)} utterances.\n")
logger.info(f"Dimensionality of representation = {features_batch[0].shape[1]}")
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
_, fnames, _ = get_audio_files(args.manifest_path)
os.makedirs(args.out_dir_path, exist_ok=True)
logger.info(f"Writing quantized features to {args.out_dir_path}")
for i, feats in enumerate(features_batch):
pred = kmeans_model.predict(feats)
emb = one_hot(pred, kmeans_model.n_clusters)
base_fname = os.path.basename(fnames[i]).rstrip(args.extension)
output_path = os.path.join(args.out_dir_path, f"{base_fname}.npy")
with open(output_path, "wb") as f:
np.save(f, emb)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import nltk
from misc.bleu_utils import sentence_bleu
import warnings
def get_target_sequences(manifest, ground_truth, to_take=1000):
import json
import pathlib
with open(ground_truth, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take_sequences = set(v[0] for v in sequence2length[:to_take])
to_take_ids = []
with open(manifest, 'r') as f:
f.readline()
for i, line in enumerate(f.readlines()):
seq_id = line.split()[0]
seq_id = pathlib.Path(seq_id).name.split('__')[0]
if seq_id in to_take_sequences:
to_take_ids.append(i)
print(f'Took {len(to_take_ids)} ids')
return set(to_take_ids)
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--manifest', required=True)
parser.add_argument('--prompts-description', required=True)
parser.add_argument('--cut-id', action='store_true',
help='Whether cut the first token (typically a seq id)')
parser.add_argument('--cut-tail', action='store_true',
help='Whether cut the last token (typically a speaker id)')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
return args
def get_self_bleu(utterances, averaging_mode, weights):
self_bleu = []
for i in range(len(utterances)):
hypo = utterances[i]
rest = utterances[:i] + utterances[i+1:]
self_bleu.append(sentence_bleu(rest, hypo, weights,
no_length_penalty=True, averaging_mode=averaging_mode))
return self_bleu
def get_self_bleu2_arithmetic(utterances):
weights = (0.5, 0.5) # equal weight for unigrams and bigrams
return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights)
def get_self_bleu2_geometric(utterances):
weights = (0.5, 0.5)
return get_self_bleu(utterances, averaging_mode='geometric', weights=weights)
def get_auto_bleu2_arithmetic(utterances):
weights = (0.5, 0.5)
return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances]
def get_auto_bleu2_geometric(utterances):
weights = (0.5, 0.5)
return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances]
def get_auto_bleu3_geometric(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances]
def get_auto_bleu3_arithmetic(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances]
def get_self_bleu3_arithmetic(utterances):
weights = (1./3, 1./3, 1./3)
return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights)
def get_self_bleu3_geometric(utterances):
weights = (1./3, 1./3, 1./3)
return get_self_bleu(utterances, averaging_mode='geometric', weights=weights)
def auto_bleu(sentence, weights, mean_mode='arithmetic'):
if len(sentence) <= 1:
return 0
N = len(weights)
bleu_n = np.zeros([N])
for n in range(N):
targ_ngrams = list(nltk.ngrams(sentence, n+1))
for p in range(len(targ_ngrams)):
left = sentence[:p]
right = sentence[(p+n+1):]
rest_ngrams = list(nltk.ngrams(left, n+1)) + \
list(nltk.ngrams(right, n+1))
# compute the nb of matching ngrams
bleu_n[n] += targ_ngrams[p] in rest_ngrams
bleu_n[n] /= len(targ_ngrams) # average them to get a proportion
weights = np.array(weights)
if mean_mode == 'arithmetic':
return (bleu_n * weights).sum()
elif mean_mode == 'geometric':
return (bleu_n ** weights).prod()
else:
raise ValueError(f'Unknown agggregation mode {mean_mode}')
def main():
from multiprocessing import Pool
args = get_args()
target_ids = get_target_sequences(args.manifest, args.prompts_description)
with open(args.asr_transcript, 'r') as fin:
lines = fin.readlines()
terms = [x.strip().split() for x in lines]
filtered = []
for term in terms:
line_id = int(term[-1].split('-')[1][:-1])
if line_id in target_ids:
filtered.append(term)
terms = filtered
if args.cut_id:
terms = [x[1:] for x in terms]
if args.cut_tail:
terms = [x[:-1] for x in terms]
if args.debug:
terms = terms[:10]
tasks = [
('Self-BLEU2-arithmetic', get_self_bleu2_arithmetic),
('Self-BLEU2-geometric', get_self_bleu2_geometric),
('Auto-BLEU2-arithmetic', get_auto_bleu2_arithmetic),
('Auto-BLEU2-geometric', get_auto_bleu2_geometric),
('Self-BLEU3-arithmetic', get_self_bleu3_arithmetic),
('Self-BLEU3-geometric', get_self_bleu3_geometric),
('Auto-BLEU3-arithmetic', get_auto_bleu3_arithmetic),
('Auto-BLEU3-geometric', get_auto_bleu3_geometric),
]
n_processes = min(16, len(tasks))
with Pool(n_processes) as pool:
metrics = pool.map(run_f, [(t[1], terms) for t in tasks])
for (metric_name, _), metric in zip(tasks, metrics):
metric, sem = np.mean(metric), np.std(metric) / np.sqrt(len(metric))
metric, sem = [
round(100 * x, 2) for x in [metric, sem]
]
print(f'{metric_name} {metric} +- {sem}')
def run_f(task_params):
f, terms = task_params
return f(terms)
if __name__ == '__main__':
# NLTK produces warnings
warnings.filterwarnings("ignore")
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import numpy as np
from misc.bleu_utils import sentence_bleu
import json
import warnings
def get_args():
import argparse
parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2")
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--prompts-description', type=str,
help='Path to the ground-truth continuation')
parser.add_argument('--manifest', type=str, required=True)
parser.add_argument('--take-shortest', type=int, default=1000)
args = parser.parse_args()
return args
def main():
# NLTK produces warnings
warnings.filterwarnings("ignore")
args = get_args()
with open(args.prompts_description, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take = set(v[0] for v in sequence2length[:args.take_shortest])
with open(args.manifest, 'r') as fin:
fin.readline()
linenum2file = dict([
(i, l.split("__")[0]) for (i, l) in enumerate(fin)
])
max_files = max(linenum2file.keys())
continuations = defaultdict(list)
mean_length_after = 0
n_examples = 0
with open(args.asr_transcript, 'r') as fin:
for line in fin:
n_examples += 1
line = line.split()
sequence_id = int(line[-1].split('-')[1][:-1])
assert sequence_id <= max_files
sequence_name = linenum2file[sequence_id]
continuations[sequence_name].append(line[:-1])
mean_length_after += len(line)
mean_length_after /= n_examples
print(f'Mean length of continuations, in words: {mean_length_after}')
metric_values = []
mean_ground_truth_words = 0
n_examples = 0
n_candidates = 0
for k, candidates in continuations.items():
if k not in to_take:
continue
n_examples += 1
ground_truth = original_continuations[k][1].split()
n_candidates += len(candidates)
bleu = sentence_bleu(candidates, ground_truth, weights=(
0.5, 0.5), no_length_penalty=True, averaging_mode="geometric")
mean_ground_truth_words += len(ground_truth)
metric_values.append(bleu)
n = len(metric_values)
print(
f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}')
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import warnings
def get_target_sequences(manifest, ground_truth, to_take=1000):
import json
import pathlib
with open(ground_truth, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take_sequences = set(v[0] for v in sequence2length[:to_take])
to_take_ids = []
with open(manifest, 'r') as f:
f.readline()
for i, line in enumerate(f.readlines()):
seq_id = line.split()[0]
seq_id = pathlib.Path(seq_id).name.split('__')[0]
if seq_id in to_take_sequences:
to_take_ids.append(i)
print(f'Took {len(to_take_ids)} ids')
return set(to_take_ids)
def get_args():
import argparse
parser = argparse.ArgumentParser("Evaluate PPX metric of a transcript.")
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--cut-id', action='store_true',
help='Whether cut the first token (typically a seq id)')
parser.add_argument('--cut-tail', action='store_true',
help='Whether cut the last token (typically a speaker id)')
parser.add_argument('--manifest', type=str, default=None)
parser.add_argument('--prompts-description', type=str, default=None)
args = parser.parse_args()
return args
def main():
args = get_args()
lm = torch.hub.load(
'pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe')
lm.eval().cuda() # disable dropout
if args.manifest is None and args.prompts_description is None:
target_ids = None
else:
target_ids = get_target_sequences(
args.manifest, args.prompts_description)
with open(args.asr_transcript, 'r') as fin:
lines = fin.readlines()
if target_ids is not None:
filtered = []
for line in lines:
line_id = line.split()[-1]
line_id = int(line_id.split('-')[1][:-1])
if line_id in target_ids:
filtered.append(line)
lines = filtered
else:
pass
if args.cut_id:
lines = [' '.join(x.split()[1:]) for x in lines]
if args.cut_tail:
lines = [' '.join(x.split()[:-1]) for x in lines]
lines = [x.strip().lower() for x in lines]
def get_logprob(sent): return \
lm.score(sent)['positional_scores'].mean().neg().item()
logprobs = [get_logprob(l) for l in lines]
filtered = [x for x in logprobs if not np.isnan(x)]
if len(filtered) != len(logprobs):
warnings.warn("NaNs detected!")
logprobs = filtered
perplexities = [np.exp(l) for l in logprobs]
for name, stats in [('logprob', logprobs), ('perplexity', perplexities)]:
mean = np.mean(stats)
sem = np.std(stats) / np.sqrt(len(stats))
median = np.median(stats)
interval = list(np.percentile(stats, [10, 90]))
mean, sem, median, percentile10, percentile90 = [
round(x, 2) for x in [mean, sem, median] + interval]
print(name)
print(f"\tMean {mean} +- {sem}")
print(
f"\tMedian {median}, 90% confidence interval {percentile10}...{percentile90}")
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/ppx.py |
"""
TODO: the code is take from Apache-2 Licensed NLTK: make sure we do this properly!
Copied over from nltk.tranlate.bleu_score. This code has two major changes:
- allows to turn off length/brevity penalty --- it has no sense for self-bleu,
- allows to use arithmetic instead of geometric mean
"""
import math
import sys
from fractions import Fraction
import warnings
from collections import Counter
from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction
def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
if no_length_penalty and averaging_mode == 'geometric':
bp = 1.0
elif no_length_penalty and averaging_mode == 'arithmetic':
bp = 0.0
else:
assert not no_length_penalty
assert averaging_mode != 'arithmetic', 'Not sure how to apply length penalty when aurithmetic mode'
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
if averaging_mode == "geometric":
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
elif averaging_mode == "arithmetic":
s = (w_i * p_i for w_i, p_i in zip(weights, p_n))
s = math.fsum(s)
return s
def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh, averaging_mode, no_length_penalty
) | EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torchaudio
import argparse
import json
import pathlib
def get_args():
parser = argparse.ArgumentParser(
"Assuring generated audio have the same length as ground-truth audio")
parser.add_argument('--samples_dir', required=True, type=str)
parser.add_argument('--out_dir', required=True, type=str)
parser.add_argument('--prompts_description', required=True, type=str)
return parser.parse_args()
def cut(src, tgt, l):
x, sr = torchaudio.load(str(src))
assert sr == 16_000
x = x.squeeze()
target_frames = int(l * sr)
flag = 0
if target_frames <= x.size(0):
x = x[:target_frames]
flag = 1
else:
flag = 0
torchaudio.save(str(tgt), x.unsqueeze(0), sr)
return flag
def main():
args = get_args()
tgt_dir = pathlib.Path(args.out_dir)
tgt_dir.mkdir(exist_ok=True, parents=True)
total_files, sufficiently_long = 0, 0
with open(args.prompts_description, 'r') as f:
description = json.loads(f.read())
for src_f in pathlib.Path(args.samples_dir).glob('*.wav'):
name_prompt = src_f.with_suffix('').name.split('__')[0]
assert name_prompt in description, f'Cannot find {name_prompt}!'
target_length = description[name_prompt][0]
tgt_f = tgt_dir / (src_f.name)
is_long_enough = cut(src_f, tgt_f, target_length)
sufficiently_long += is_long_enough
if not is_long_enough:
print(f'{src_f} is not long enough')
total_files += 1
print(
f'Total files: {total_files}; sufficiently long: {sufficiently_long}')
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import logging
import os
import joblib
import soundfile as sf
import torch
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_feature_reader
from examples.textless_nlp.gslm.unit2speech.tts_data import TacotronInputDataset
from examples.textless_nlp.gslm.unit2speech.utils import (
load_tacotron,
load_waveglow,
synthesize_audio,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(description="GSLM U2S tool")
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--acoustic_model_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument("--layer", type=int, help="Layer of acoustic model")
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--tts_model_path",
type=str,
help="TTS model file path to use for inference",
)
parser.add_argument(
"--code_dict_path",
type=str,
help="Code dict file path to use for inference",
)
parser.add_argument(
"--waveglow_path",
type=str,
help="Waveglow (vocoder) model file path to use for inference",
)
parser.add_argument("--max_decoder_steps", type=int, default=2000)
parser.add_argument("--denoiser_strength", type=float, default=0.1)
return parser
################################################
def main(args, logger):
# Acoustic Model
logger.info(f"Loading acoustic model from {args.tts_model_path}...")
feature_reader_cls = get_feature_reader(args.feature_type)
reader = feature_reader_cls(
checkpoint_path=args.acoustic_model_path, layer=args.layer
)
# K-means Model
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
# TTS Model
logger.info(f"Loading TTS model from {args.tts_model_path}...")
tacotron_model, sample_rate, hparams = load_tacotron(
tacotron_model_path=args.tts_model_path,
max_decoder_steps=args.max_decoder_steps,
)
# Waveglow Model
logger.info(f"Loading Waveglow model from {args.waveglow_path}...")
waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path)
# Dataset
if not os.path.exists(hparams.code_dict):
hparams.code_dict = args.code_dict_path
tts_dataset = TacotronInputDataset(hparams)
iters = 0
while True:
in_file_path = input("Input: Enter the full file path of audio file...\n")
out_file_path = input("Output: Enter the full file path of audio file...\n")
feats = reader.get_feats(in_file_path).cpu().numpy()
iters += 1
if iters == 1000:
gc.collect()
torch.cuda.empty_cache()
quantized_units = kmeans_model.predict(feats)
quantized_units_str = " ".join(map(str, quantized_units))
tts_input = tts_dataset.get_tensor(quantized_units_str)
mel, aud, aud_dn, has_eos = synthesize_audio(
tacotron_model,
waveglow,
denoiser,
tts_input.unsqueeze(0),
strength=args.denoiser_strength,
)
sf.write(f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate)
logger.info("Resynthesis done!\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/tools/resynthesize_speech.py |
EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_and_dump_features,
)
def get_parser():
parser = argparse.ArgumentParser(
description="Compute and dump log mel fbank features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
help="Acoustic feature type",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_features_path",
type=str,
default=None,
help="Features file path to write to",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--sample_pct",
type=float,
help="Percent data to use for K-means training",
default=0.1,
)
parser.add_argument(
"--out_features_path",
type=str,
help="Path to save log mel fbank features",
)
return parser
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
if __name__ == "__main__":
"""
Example command:
python ~/speechbot/clustering/dump_logmelfank_feats.py \
--manifest_path /checkpoint/kushall/data/LJSpeech-1.1/asr_input_wavs_16k/train.tsv
--out_features_path /checkpoint/kushall/experiments/speechbot/logmelfbank/features/ljspeech/train.npy
"""
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
logger.info(f"Extracting {args.feature_type} acoustic features...")
get_and_dump_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
out_features_path=args.out_features_path,
)
logger.info(f"Saved extracted features at {args.out_features_path}")
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/dump_feats.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import time
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_and_dump_features,
get_features,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Learn K-means clustering over acoustic features."
)
# Features arguments
parser.add_argument(
"--in_features_path", type=str, default=None, help="Features file path"
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
help="Acoustic feature type",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_features_path",
type=str,
default=None,
help="Features file path to write to",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--sample_pct",
type=float,
help="Percent data to use for K-means training",
default=0.1,
)
# K-means arguments
parser.add_argument(
"--num_clusters", type=int, help="Nubmer of clusters", default=50
)
parser.add_argument("--init", default="k-means++")
parser.add_argument(
"--max_iter",
type=int,
help="Maximum number of iterations for K-means training",
default=150,
)
parser.add_argument(
"--batch_size",
type=int,
help="Batch size for K-means training",
default=10000,
)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.5, type=float)
parser.add_argument(
"--out_kmeans_model_path",
type=str,
required=True,
help="Path to save K-means model",
)
# Leftovers
parser.add_argument(
"--seed",
type=int,
help="Random seed to use for K-means training",
default=1369,
)
return parser
def get_kmeans_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
random_state,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
tol=tol,
max_no_improvement=max_no_improvement,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
random_state=random_state,
verbose=1,
compute_labels=True,
init_size=None,
)
def train_kmeans(kmeans_model, features_batch):
start_time = time.time()
kmeans_model.fit(features_batch)
time_taken = round((time.time() - start_time) // 60, 2)
return kmeans_model, time_taken
def main(args, logger):
# Features loading/extraction for K-means
if args.in_features_path:
# Feature loading
logger.info(f"Loading features from {args.in_features_path}...")
features_batch = np.load(args.in_features_path, allow_pickle=True)
else:
# Feature extraction
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = (
get_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
)
if not args.out_features_path
else get_and_dump_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
out_features_path=args.out_features_path,
)
)
if args.out_features_path:
logger.info(
f"Saved extracted features at {args.out_features_path}"
)
logger.info(f"Features shape = {features_batch.shape}\n")
# Learn and save K-means model
kmeans_model = get_kmeans_model(
n_clusters=args.num_clusters,
init=args.init,
max_iter=args.max_iter,
batch_size=args.batch_size,
tol=args.tol,
max_no_improvement=args.max_no_improvement,
n_init=args.n_init,
reassignment_ratio=args.reassignment_ratio,
random_state=args.seed,
)
logger.info("Starting k-means training...")
kmeans_model, time_taken = train_kmeans(
kmeans_model=kmeans_model, features_batch=features_batch
)
logger.info(f"...done k-means training in {time_taken} minutes")
inertia = -kmeans_model.score(features_batch) / len(features_batch)
logger.info(f"Total intertia: {round(inertia, 2)}\n")
logger.info(f"Saving k-means model to {args.out_kmeans_model_path}")
os.makedirs(os.path.dirname(args.out_kmeans_model_path), exist_ok=True)
joblib.dump(kmeans_model, open(args.out_kmeans_model_path, "wb"))
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py |
EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]:
fnames, sizes = [], []
with open(manifest_path, "r") as f:
root_dir = f.readline().strip()
for line in f:
items = line.strip().split("\t")
assert (
len(items) == 2
), f"File must have two columns separated by tab. Got {line}"
fnames.append(items[0])
sizes.append(int(items[1]))
return root_dir, fnames, sizes
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import numpy as np
import joblib
from examples.textless_nlp.gslm.speech2unit.clustering.utils import (
get_audio_files,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_features,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Quantize using K-means clustering over acoustic features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--acoustic_model_path",
type=str,
help="Pretrained acoustic model checkpoint"
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--features_path",
type=str,
default=None,
help="Features file path. You don't need to enter acoustic model details if you have dumped features",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_quantized_file_path",
required=True,
type=str,
help="File path of quantized output.",
)
parser.add_argument(
"--extension", type=str, default=".flac", help="Features file path"
)
parser.add_argument(
"--channel_id",
choices=['1', '2'],
help="The audio channel to extract the units in case of stereo file.",
default=None,
)
parser.add_argument(
"--hide-fname", action='store_true',
help="Hide file names in the output file."
)
return parser
def main(args, logger):
# Feature extraction
if args.features_path is not None:
logger.info(f"Loading acoustic features from {args.features_path}...")
features_batch = np.load(args.features_path)
else:
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = get_features(
feature_type=args.feature_type,
checkpoint_path=args.acoustic_model_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=1.0,
flatten=False,
channel_id=int(args.channel_id) if args.channel_id else None,
)
logger.info(
f"Features extracted for {len(features_batch)} utterances.\n"
)
logger.info(
f"Dimensionality of representation = {features_batch[0].shape[1]}"
)
# K-means model
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
_, fnames, _ = get_audio_files(args.manifest_path)
os.makedirs(os.path.dirname(args.out_quantized_file_path), exist_ok=True)
print(f"Writing quantized predictions to {args.out_quantized_file_path}")
with open(args.out_quantized_file_path, "w") as fout:
for i, feats in enumerate(features_batch):
pred = kmeans_model.predict(feats)
pred_str = " ".join(str(p) for p in pred)
base_fname = os.path.basename(fnames[i]).rstrip('.'+args.extension.lstrip('.'))
if args.channel_id is not None:
base_fname = base_fname+f'-channel{args.channel_id}'
if not args.hide_fname:
fout.write(f"{base_fname}|{pred_str}\n")
else:
fout.write(f"{pred_str}\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py |
import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
class CpcFeatureReader:
"""
Wrapper class to run inference on CPC model.
Helps extract features for a given audio file.
"""
def __init__(
self,
checkpoint_path,
layer,
use_encoder_layer=False,
norm_features=False,
sample_rate=16000,
max_chunk=64000,
use_cuda=True,
):
self.model = load_cpc_model(checkpoint_path, layer).eval()
self.sample_rate = sample_rate
self.max_chunk = max_chunk
self.norm_features = norm_features
self.use_encoder_layer = use_encoder_layer
self.use_cuda = use_cuda
if self.use_cuda:
self.model.cuda()
def read_audio(self, path, ref_len=None, channel_id=None):
wav, sr = sf.read(path)
if channel_id is not None:
assert wav.ndim == 2, \
f"Expected stereo input when channel_id is given ({path})"
assert channel_id in [1, 2], \
"channel_id is expected to be in [1, 2]"
wav = wav[:, channel_id-1]
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.sample_rate, sr
if ref_len is not None and abs(ref_len - len(wav)) > 160:
print(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, file_path, ref_len=None, channel_id=None):
x = self.read_audio(file_path, ref_len, channel_id)
# Inspired from CPC_audio feature_loader.py
with torch.no_grad():
x = torch.from_numpy(x).float()
if self.use_cuda:
x = x.cuda()
x = x.view(1, 1, -1)
size = x.size(2)
feat = []
start = 0
while start < size:
if start + self.max_chunk > size:
break
x_chunk = x[..., start : start + self.max_chunk]
feat_chunk = self.model.extract_features(
source=x_chunk,
get_encoded=self.use_encoder_layer,
norm_output=self.norm_features,
)
feat.append(feat_chunk)
start += self.max_chunk
if start < size:
x_chunk = x[:, -self.max_chunk :]
feat_chunk = self.model.extract_features(
source=x_chunk,
get_encoded=self.use_encoder_layer,
norm_output=self.norm_features,
)
df = x_chunk.size(2) // feat_chunk.size(1)
delta = (size - start) // df
feat.append(feat_chunk[:, -delta:])
return torch.cat(feat, 1).squeeze(0)
def load_cpc_model(checkpoint_path, layer=None):
state_dict = torch.load(checkpoint_path)
weights = state_dict["weights"]
config = state_dict["config"]
if layer is not None:
config["nLevelsGRU"] = layer
encoder = CPCEncoder(config["hiddenEncoder"])
ar_net = CPCAR(
config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"]
)
model = CPCModel(encoder, ar_net)
model.load_state_dict(weights, strict=False)
model.config = config
return model
class ChannelNorm(nn.Module):
def __init__(self, num_features, epsilon=1e-05, affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cum_mean = x.mean(dim=1, keepdim=True)
cum_var = x.var(dim=1, keepdim=True)
x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self, hidden_dim=512):
super(CPCEncoder, self).__init__()
self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3)
self.batchNorm0 = ChannelNorm(hidden_dim)
self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2)
self.batchNorm1 = ChannelNorm(hidden_dim)
self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm2 = ChannelNorm(hidden_dim)
self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm3 = ChannelNorm(hidden_dim)
self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm4 = ChannelNorm(hidden_dim)
self.DOWNSAMPLING = 160
def get_output_dim(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class CPCAR(nn.Module):
def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers):
super(CPCAR, self).__init__()
self.baseNet = nn.LSTM(
dim_encoded, dim_output, num_layers=num_layers, batch_first=True
)
self.hidden = None
self.keep_hidden = keep_hidden
def get_output_dim(self):
return self.baseNet.hidden_size
def forward(self, x):
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keep_hidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
return x
class CPCModel(nn.Module):
def __init__(self, encoder, ar_net):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = ar_net
self.config = None
def forward(self, x, label):
encoded = self.gEncoder(x).permute(0, 2, 1)
cpc_feature = self.gAR(encoded)
return cpc_feature, encoded, label
def extract_features(self, source, get_encoded=False, norm_output=False):
cpc_feature, encoded, _ = self.forward(source, None)
if get_encoded:
cpc_feature = encoded
if norm_output:
mean = cpc_feature.mean(dim=1, keepdim=True)
var = cpc_feature.var(dim=1, keepdim=True)
cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08)
return cpc_feature
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import fairseq
import soundfile as sf
import torch.nn.functional as F
class HubertFeatureReader:
"""
Wrapper class to run inference on HuBERT model.
Helps extract features for a given audio file.
"""
def __init__(self, checkpoint_path, layer, max_chunk=1600000, use_cuda=True):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path]
)
self.model = model[0].eval()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
self.use_cuda = use_cuda
if self.use_cuda:
self.model.cuda()
def read_audio(self, path, ref_len=None, channel_id=None):
wav, sr = sf.read(path)
if channel_id is not None:
assert wav.ndim == 2, \
f"Expected stereo input when channel_id is given ({path})"
assert channel_id in [1, 2], \
"channel_id is expected to be in [1, 2]"
wav = wav[:, channel_id-1]
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.task.cfg.sample_rate, sr
if ref_len is not None and abs(ref_len - len(wav)) > 160:
print(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, file_path, ref_len=None, channel_id=None):
x = self.read_audio(file_path, ref_len, channel_id)
with torch.no_grad():
x = torch.from_numpy(x).float()
if self.use_cuda:
x = x.cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start: start + self.max_chunk]
feat_chunk, _ = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os
import random
import shutil
import numpy as np
import torch
import tqdm
from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import (
CpcFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import (
HubertFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import (
LogMelFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import (
Wav2VecFeatureReader,
)
def get_feature_reader(feature_type):
if feature_type == "logmel":
return LogMelFeatureReader
elif feature_type == "hubert":
return HubertFeatureReader
elif feature_type == "w2v2":
return Wav2VecFeatureReader
elif feature_type == "cpc":
return CpcFeatureReader
else:
raise NotImplementedError(f"{feature_type} is not supported.")
def get_feature_iterator(
feature_type, checkpoint_path, layer, manifest_path, sample_pct, channel_id
):
feature_reader_cls = get_feature_reader(feature_type)
with open(manifest_path, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
file_path_list = [
os.path.join(root, line.split("\t")[0])
for line in lines
if len(line) > 0
]
if sample_pct < 1.0:
file_path_list = random.sample(
file_path_list, int(sample_pct * len(file_path_list))
)
num_files = len(file_path_list)
reader = feature_reader_cls(
checkpoint_path=checkpoint_path, layer=layer
)
def iterate():
for file_path in file_path_list:
feats = reader.get_feats(file_path, channel_id=channel_id)
yield feats.cpu().numpy()
return iterate, num_files
def get_features(
feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten, channel_id
):
generator, num_files = get_feature_iterator(
feature_type=feature_type,
checkpoint_path=checkpoint_path,
layer=layer,
manifest_path=manifest_path,
sample_pct=sample_pct,
channel_id=channel_id
)
iterator = generator()
features_list = []
for features in tqdm.tqdm(iterator, total=num_files):
features_list.append(features)
# Explicit clean up
del iterator
del generator
gc.collect()
torch.cuda.empty_cache()
if flatten:
return np.concatenate(features_list)
return features_list
def get_and_dump_features(
feature_type,
checkpoint_path,
layer,
manifest_path,
sample_pct,
flatten,
out_features_path,
):
# Feature extraction
features_batch = get_features(
feature_type=feature_type,
checkpoint_path=checkpoint_path,
layer=layer,
manifest_path=manifest_path,
sample_pct=sample_pct,
flatten=flatten,
)
# Save features
out_dir_path = os.path.dirname(out_features_path)
os.makedirs(out_dir_path, exist_ok=True)
shutil.copyfile(
manifest_path,
os.path.join(out_dir_path, os.path.basename(manifest_path)),
)
np.save(out_features_path, features_batch)
return features_batch
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import fairseq
import soundfile as sf
class Wav2VecFeatureReader:
"""
Wrapper class to run inference on Wav2Vec 2.0 model.
Helps extract features for a given audio file.
"""
def __init__(self, checkpoint_path, layer, use_cuda=True):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(
checkpoint_path
)
w2v_args = state["args"]
self.task = fairseq.tasks.setup_task(w2v_args)
model = self.task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
self.model = model
self.layer = layer
self.use_cuda = use_cuda
if self.use_cuda:
self.model.cuda()
def read_audio(self, fname, channel_id=None):
wav, sr = sf.read(fname)
if channel_id is not None:
assert wav.ndim == 2, \
f"Expected stereo input when channel_id is given ({fname})"
assert channel_id in [1, 2], \
"channel_id is expected to be in [1, 2]"
wav = wav[:, channel_id-1]
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.task.cfg.sample_rate, sr
return wav
def get_feats(self, file_path, channel_id=None):
x = self.read_audio(file_path, channel_id)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float()
if self.use_cuda:
source = source.cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/w2v2_feature_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import soundfile as sf
import torch
import torchaudio.compliance.kaldi as kaldi
class LogMelFeatureReader:
"""
Wrapper class to run inference on HuBERT model.
Helps extract features for a given audio file.
"""
def __init__(self, *args, **kwargs):
self.num_mel_bins = kwargs.get("num_mel_bins", 80)
self.frame_length = kwargs.get("frame_length", 25.0)
def get_feats(self, file_path, channel_id=None):
wav, sr = sf.read(file_path)
if channel_id is not None:
assert wav.ndim == 2, \
f"Expected stereo input when channel_id is given ({file_path})"
wav = wav[:, channel_id-1]
feats = torch.from_numpy(wav).float()
feats = kaldi.fbank(
feats.unsqueeze(0),
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
sample_frequency=sr,
)
return feats
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/logmel_feature_reader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import soundfile as sf
from examples.textless_nlp.gslm.unit2speech.tts_data import (
TacotronInputDataset,
)
from examples.textless_nlp.gslm.unit2speech.utils import (
load_quantized_audio_from_file,
load_tacotron,
load_waveglow,
synthesize_audio,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Wav2Vec 2.0 speech generator."
)
parser.add_argument(
"--quantized_unit_path",
type=str,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--tts_model_path",
type=str,
help="TTS model file path to use for inference",
)
parser.add_argument(
"--waveglow_path",
type=str,
help="Path to the waveglow checkpoint (vocoder).",
)
parser.add_argument(
"--code_dict_path",
type=str,
help="Code dict file path to use for inference",
)
parser.add_argument("--max_decoder_steps", type=int, default=2000)
parser.add_argument("--denoiser_strength", type=float, default=0.1)
parser.add_argument(
"--out_audio_dir",
type=str,
help="Output directory to dump audio files",
)
return parser
def main(args, logger):
# Load quantized audio
logger.info(f"Loading quantized audio from {args.quantized_unit_path}...")
names_batch, quantized_units_batch = load_quantized_audio_from_file(
file_path=args.quantized_unit_path
)
logger.info(f"Loading TTS model from {args.tts_model_path}...")
tacotron_model, sample_rate, hparams = load_tacotron(
tacotron_model_path=args.tts_model_path,
max_decoder_steps=args.max_decoder_steps,
)
logger.info(f"Loading Waveglow model from {args.waveglow_path}...")
waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path)
if not os.path.exists(hparams.code_dict):
hparams.code_dict = args.code_dict_path
tts_dataset = TacotronInputDataset(hparams)
for name, quantized_units in zip(names_batch, quantized_units_batch):
quantized_units_str = " ".join(map(str, quantized_units))
tts_input = tts_dataset.get_tensor(quantized_units_str)
mel, aud, aud_dn, has_eos = synthesize_audio(
tacotron_model,
waveglow,
denoiser,
tts_input.unsqueeze(0),
strength=args.denoiser_strength,
)
out_file_path = os.path.join(args.out_audio_dir, f"{name}.wav")
sf.write(
f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate
)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from examples.textless_nlp.gslm.unit2speech.tacotron2.text import (
EOS_TOK,
SOS_TOK,
code_to_sequence,
text_to_sequence,
)
from examples.textless_nlp.gslm.unit2speech.tacotron2.utils import (
load_code_dict,
)
class TacotronInputDataset:
def __init__(self, hparams, append_str=""):
self.is_text = getattr(hparams, "text_or_code", "text") == "text"
if not self.is_text:
self.code_dict = load_code_dict(
hparams.code_dict, hparams.add_sos, hparams.add_eos
)
self.code_key = hparams.code_key
self.add_sos = hparams.add_sos
self.add_eos = hparams.add_eos
self.collapse_code = hparams.collapse_code
self.append_str = append_str
def process_code(self, inp_str):
inp_toks = inp_str.split()
if self.add_sos:
inp_toks = [SOS_TOK] + inp_toks
if self.add_eos:
inp_toks = inp_toks + [EOS_TOK]
return code_to_sequence(inp_toks, self.code_dict, self.collapse_code)
def process_text(self, inp_str):
return text_to_sequence(inp_str, ["english_cleaners"])
def get_tensor(self, inp_str):
# uid, txt, inp_str = self._get_data(idx)
inp_str = inp_str + self.append_str
if self.is_text:
inp_toks = self.process_text(inp_str)
else:
inp_toks = self.process_code(inp_str)
return torch.from_numpy(np.array(inp_toks)).long()
def __len__(self):
return len(self.data)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tts_data.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import copy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total
return loss/(z.size(0)*z.size(1)*z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
_qr = torch.linalg.qr if torch.__version__ >= "1.8" else torch.qr
W = _qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i*2*self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:,spect_offset:spect_offset+2*self.n_channels,:],
n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:,:self.n_channels,:]
output = output + res_skip_acts[:,self.n_channels:,:]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group/2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size/2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:,:self.n_early_size,:])
audio = audio[:,self.n_early_size:,:]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s)*audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1],1)
output_audio.append(audio)
return torch.cat(output_audio,1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == 'torch.cuda.HalfTensor':
audio = torch.cuda.HalfTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
else:
audio = torch.cuda.FloatTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma*audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b)/torch.exp(s)
audio = torch.cat([audio_0, audio_1],1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == 'torch.cuda.HalfTensor':
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma*z, audio),1)
audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/glow.py |
import os
import shlex
import subprocess
import progressbar
from time import time
from pathlib import Path
def find_all_files(path_dir, extension):
out = []
for root, dirs, filenames in os.walk(path_dir):
for f in filenames:
if f.endswith(extension):
out.append(((str(Path(f).stem)), os.path.join(root, f)))
return out
def convert16k(inputfile, outputfile16k):
command = ('sox -c 1 -b 16 {} -t wav {} rate 16k'.format(inputfile, outputfile16k))
subprocess.call(shlex.split(command))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Convert to wav 16k audio using sox.')
parser.add_argument('input_dir', type=str,
help='Path to the input dir.')
parser.add_argument('output_dir', type=str,
help='Path to the output dir.')
parser.add_argument('--extension', type=str, default='wav',
help='Audio file extension in the input. Default: mp3')
args = parser.parse_args()
# Find all sequences
print(f"Finding all audio files with extension '{args.extension}' from {args.input_dir}...")
audio_files = find_all_files(args.input_dir, args.extension)
print(f"Done! Found {len(audio_files)} files.")
# Convert to relative path
audio_files = [os.path.relpath(file[-1], start=args.input_dir) for file in audio_files]
# Create all the directories needed
rel_dirs_set = set([os.path.dirname(file) for file in audio_files])
for rel_dir in rel_dirs_set:
Path(os.path.join(args.output_dir, rel_dir)).mkdir(parents=True, exist_ok=True)
# Converting wavs files
print("Converting the audio to wav files...")
bar = progressbar.ProgressBar(maxval=len(audio_files))
bar.start()
start_time = time()
for index, file in enumerate(audio_files):
bar.update(index)
input_file = os.path.join(args.input_dir, file)
output_file = os.path.join(args.output_dir, os.path.splitext(file)[0]+".wav")
convert16k(input_file, output_file)
bar.finish()
print(f"...done {len(audio_files)} files in {time()-start_time} seconds.") | EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/convert_to_16k.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2
from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import (
Denoiser,
)
def load_quantized_audio_from_file(file_path):
base_fname_batch, quantized_units_batch = [], []
with open(file_path) as f:
for line in f:
base_fname, quantized_units_str = line.rstrip().split("|")
quantized_units = [int(q) for q in quantized_units_str.split(" ")]
base_fname_batch.append(base_fname)
quantized_units_batch.append(quantized_units)
return base_fname_batch, quantized_units_batch
def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0):
assert inp.size(0) == 1
inp = inp.cuda()
if lab is not None:
lab = torch.LongTensor(1).cuda().fill_(lab)
with torch.no_grad():
_, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True)
aud = waveglow.infer(mel, sigma=0.666)
aud_dn = denoiser(aud, strength=strength).squeeze(1)
return mel, aud, aud_dn, has_eos
def load_tacotron(tacotron_model_path, max_decoder_steps):
ckpt_dict = torch.load(tacotron_model_path)
hparams = ckpt_dict["hparams"]
hparams.max_decoder_steps = max_decoder_steps
sr = hparams.sampling_rate
model = Tacotron2(hparams)
model.load_state_dict(ckpt_dict["model_dict"])
model = model.cuda().eval().half()
return model, sr, hparams
def load_waveglow(waveglow_path):
waveglow = torch.load(waveglow_path)["model"]
waveglow = waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
denoiser = Denoiser(waveglow)
return waveglow, denoiser
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py |
import os
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
log_dir = argslist[-1]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
print("GPU log directory is {}".format(log_dir))
os.makedirs(log_dir, exist_ok=True)
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/multiproc.py |
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py |
# import sys
# sys.path.append('tacotron2')
import torch
from .layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py |
EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py |
|
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/audio_processing.py |
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py |
from math import sqrt
import torch
import torch.distributions as distr
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from .layers import ConvNorm, LinearNorm, GlobalAvgPool
from .utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class AudioEncoder(nn.Module):
def __init__(self, hparams):
super(AudioEncoder, self).__init__()
assert hparams.lat_dim > 0
convolutions = []
inp_dim = hparams.n_mel_channels
for _ in range(hparams.lat_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(inp_dim, hparams.lat_n_filters,
kernel_size=hparams.lat_kernel_size, stride=1,
padding=int((hparams.lat_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.lat_n_filters))
inp_dim = hparams.lat_n_filters
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.lat_n_filters,
int(hparams.lat_n_filters / 2),
hparams.lat_n_blstms, batch_first=True,
bidirectional=True)
self.pool = GlobalAvgPool()
self.mu_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim)
self.logvar_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim)
self.lat_dim = hparams.lat_dim
def forward(self, x, lengths):
"""
Args:
x (torch.Tensor): (B, F, T)
"""
for conv in self.convolutions:
x = F.dropout(F.tanh(conv(x)), 0.5, self.training)
x = x.transpose(1, 2) # (B, T, D)
# x may not be sorted by length. Sort->process->unsort
max_len = x.size(1)
assert max_len == torch.max(lengths).item()
lengths, perm_idx = lengths.sort(0, descending=True)
x = x[perm_idx]
x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
_, unperm_idx = perm_idx.sort(0)
outputs = outputs[unperm_idx] # (B, T, D)
lengths = lengths[unperm_idx] # (B, T, D)
outputs = self.pool(outputs, lengths) # (B, D)
mu = self.mu_proj(outputs)
logvar = self.logvar_proj(outputs)
z = distr.Normal(mu, logvar).rsample()
return z, mu, logvar
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim
self.obs_dim = hparams.obs_dim
self.lat_dim = hparams.lat_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.p_attention_dropout = hparams.p_attention_dropout
self.p_decoder_dropout = hparams.p_decoder_dropout
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, hparams.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
encoder_tot_dim = (hparams.encoder_embedding_dim + \
hparams.lat_dim + hparams.obs_dim)
self.decoder_rnn = nn.LSTMCell(
hparams.attention_rnn_dim + encoder_tot_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + encoder_tot_dim,
hparams.n_mel_channels * hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + encoder_tot_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, obs_and_lat, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.obs_and_lat = obs_and_lat
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
if self.obs_and_lat is not None:
decoder_input = torch.cat((decoder_input, self.obs_and_lat), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
if self.obs_and_lat is not None:
decoder_hidden_attention_context = torch.cat(
(decoder_hidden_attention_context, self.obs_and_lat), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, obs_and_lat, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
self.initialize_decoder_states(
memory, obs_and_lat, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory, obs_and_lat, ret_has_eos=False):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, obs_and_lat, mask=None)
mel_outputs, gate_outputs, alignments = [], [], []
has_eos = False
while True:
decoder_input = self.prenet(decoder_input)
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if torch.sigmoid(gate_output.data) > self.gate_threshold:
has_eos = True
break
elif len(mel_outputs) == self.max_decoder_steps:
# print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
if ret_has_eos:
return mel_outputs, gate_outputs, alignments, has_eos
else:
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
# initialize text encoder embedding
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
# initialize observed attribute embedding
self.obs_embedding = None
if hparams.obs_dim > 0:
self.obs_embedding = nn.Embedding(
hparams.obs_n_class, hparams.obs_dim)
std = sqrt(2.0 / (hparams.obs_n_class + hparams.obs_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.obs_embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
self.lat_encoder = None
if hparams.lat_dim > 0:
self.lat_encoder = AudioEncoder(hparams)
def parse_batch(self, batch):
(text_padded, input_lengths, obs_labels,
mel_padded, gate_padded, output_lengths) = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
obs_labels = to_gpu(obs_labels).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, obs_labels,
mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
(text_inputs, text_lengths, obs_labels,
mels, max_len, output_lengths) = inputs
text_lengths, output_lengths = text_lengths.data, output_lengths.data
embedded_inputs = self.embedding(text_inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
obs = None
if self.obs_embedding is not None:
obs = self.obs_embedding(obs_labels)
lat, lat_mu, lat_logvar = None, None, None
if self.lat_encoder is not None:
(lat, lat_mu, lat_logvar) = self.lat_encoder(mels, output_lengths)
obs_and_lat = [x for x in [obs, lat] if x is not None]
if bool(obs_and_lat):
obs_and_lat = torch.cat(obs_and_lat, dim=-1)
else:
obs_and_lat = None
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, obs_and_lat, mels, memory_lengths=text_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments,
lat_mu, lat_logvar],
output_lengths)
def inference(self, inputs, obs_labels=None, lat=None, ret_has_eos=False):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.inference(embedded_inputs)
if obs_labels is None:
obs_labels = torch.LongTensor(len(inputs))
obs_labels = obs_labels.to(inputs.device).zero_()
obs = None
if self.obs_embedding is not None:
obs = self.obs_embedding(obs_labels)
if self.lat_encoder is not None:
if lat is None:
lat = torch.FloatTensor(len(inputs), self.lat_encoder.lat_dim)
lat = lat.to(inputs.device).zero_().type(encoder_outputs.type())
obs_and_lat = [x for x in [obs, lat] if x is not None]
if bool(obs_and_lat):
obs_and_lat = torch.cat(obs_and_lat, dim=-1)
else:
obs_and_lat = None
mel_outputs, gate_outputs, alignments, has_eos = self.decoder.inference(
encoder_outputs, obs_and_lat, ret_has_eos=True)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
if ret_has_eos:
return outputs + [has_eos]
else:
return outputs
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from .audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/stft.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import io
import json
import librosa
import numpy as np
import soundfile as sf
import time
import torch
from scipy.io.wavfile import read
from .text import SOS_TOK, EOS_TOK
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1))
return mask
def load_wav_to_torch(full_path, sr=None):
data, sr = librosa.load(full_path, sr=sr)
data = np.clip(data, -1, 1) # potentially out of [-1, 1] due to resampling
data = data * 32768.0 # match values loaded by scipy
return torch.FloatTensor(data.astype(np.float32)), sr
def read_binary_audio(bin_data, tar_sr=None):
"""
read binary audio (`bytes` or `uint8` `numpy.ndarray`) to `float32`
`numpy.ndarray`
RETURNS:
data (np.ndarray) : audio of shape (n,) or (2, n)
tar_sr (int) : sample rate
"""
data, ori_sr = sf.read(io.BytesIO(bin_data), dtype='float32')
data = data.T
if (tar_sr is not None) and (ori_sr != tar_sr):
data = librosa.resample(data, ori_sr, tar_sr)
else:
tar_sr = ori_sr
data = np.clip(data, -1, 1)
data = data * 32768.0
return torch.FloatTensor(data.astype(np.float32)), tar_sr
def load_filepaths_and_text(filename):
with open(filename, encoding='utf-8') as f:
data = [json.loads(line.rstrip()) for line in f]
return data
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
def load_code_dict(path, add_sos=False, add_eos=False):
if not path:
return {}
with open(path, 'r') as f:
codes = ['_'] + [line.rstrip() for line in f] # '_' for pad
code_dict = {c: i for i, c in enumerate(codes)}
if add_sos:
code_dict[SOS_TOK] = len(code_dict)
if add_eos:
code_dict[EOS_TOK] = len(code_dict)
assert(set(code_dict.values()) == set(range(len(code_dict))))
return code_dict
def load_obs_label_dict(path):
if not path:
return {}
with open(path, 'r') as f:
obs_labels = [line.rstrip() for line in f]
return {c: i for i, c in enumerate(obs_labels)}
# A simple timer class inspired from `tnt.TimeMeter`
class CudaTimer:
def __init__(self, keys):
self.keys = keys
self.reset()
def start(self, key):
s = torch.cuda.Event(enable_timing=True)
s.record()
self.start_events[key].append(s)
return self
def stop(self, key):
e = torch.cuda.Event(enable_timing=True)
e.record()
self.end_events[key].append(e)
return self
def reset(self):
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
self.running_times = collections.defaultdict(float)
self.n = collections.defaultdict(int)
return self
def value(self):
self._synchronize()
return {k: self.running_times[k] / self.n[k] for k in self.keys}
def _synchronize(self):
torch.cuda.synchronize()
for k in self.keys:
starts = self.start_events[k]
ends = self.end_events[k]
if len(starts) == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
if len(ends) != len(starts):
raise ValueError("Call stop before checking value!")
time = 0
for start, end in zip(starts, ends):
time += start.elapsed_time(end)
self.running_times[k] += time * 1e-3
self.n[k] += len(starts)
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
# Used to measure the time taken for multiple events
class Timer:
def __init__(self, keys):
self.keys = keys
self.n = {}
self.running_time = {}
self.total_time = {}
self.reset()
def start(self, key):
self.running_time[key] = time.time()
return self
def stop(self, key):
self.total_time[key] = time.time() - self.running_time[key]
self.n[key] += 1
self.running_time[key] = None
return self
def reset(self):
for k in self.keys:
self.total_time[k] = 0
self.running_time[k] = None
self.n[k] = 0
return self
def value(self):
vals = {}
for k in self.keys:
if self.n[k] == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
else:
vals[k] = self.total_time[k] / self.n[k]
return vals
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/utils.py |
""" from https://github.com/keithito/tacotron """
import numpy as np
import re
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Special symbols
SOS_TOK = '<s>'
EOS_TOK = '</s>'
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sample_code_chunk(code, size):
assert(size > 0 and size <= len(code))
start = np.random.randint(len(code) - size + 1)
end = start + size
return code[start:end], start, end
def code_to_sequence(code, code_dict, collapse_code):
if collapse_code:
prev_c = None
sequence = []
for c in code:
if c in code_dict and c != prev_c:
sequence.append(code_dict[c])
prev_c = c
else:
sequence = [code_dict[c] for c in code if c in code_dict]
if len(sequence) < 0.95 * len(code):
print('WARNING : over 5%% codes are OOV')
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def sequence_to_code(sequence, code_dict):
'''Analogous to sequence_to_text'''
id_to_code = {i: c for c, i in code_dict.items()}
return ' '.join([id_to_code[i] for i in sequence])
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s != '_' and s != '~'
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py |
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cleaners.py |
import torch
from librosa.filters import mel as librosa_mel_fn
from .audio_processing import dynamic_range_compression
from .audio_processing import dynamic_range_decompression
from .stft import STFT
from .utils import get_mask_from_lengths
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class GlobalAvgPool(torch.nn.Module):
def __init__(self):
super(GlobalAvgPool, self).__init__()
def forward(self, x, lengths=None):
"""Average pooling across time steps (dim=1) with optionally lengths.
Args:
x: torch.Tensor of shape (N, T, ...)
lengths: None or torch.Tensor of shape (N,)
dim: dimension to pool
"""
if lengths is None:
return x.mean(dim=1, keepdim=False)
else:
mask = get_mask_from_lengths(lengths).type(x.type()).to(x.device)
mask_shape = list(mask.size()) + [1 for _ in range(x.ndimension()-2)]
mask = mask.reshape(*mask_shape)
numer = (x * mask).sum(dim=1, keepdim=False)
denom = mask.sum(dim=1, keepdim=False)
return numer / denom
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/layers.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Sample from a trained LM; hacked fairseq-interactive
"""
from collections import namedtuple
import os
import ast
import numpy as np
from fairseq import checkpoint_utils, options, tasks, utils
import tqdm
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def make_batches(lines, args, task, max_positions):
tokens = [
task.source_dictionary.encode_line(
src_str, add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.dataset.max_tokens,
max_sentences=args.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=args.dataset.skip_invalid_size_inputs_valid_test
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],
)
def main(args):
arg_prompts = args.prompts
arg_output = args.output
arg_debug = args.debug
arg_sample_size = args.samples_per_prompt
try:
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
args = convert_namespace_to_omegaconf(args)
except:
pass
# if args.max_tokens is None and args.max_sentences is None:
if args.common.seed is not None:
np.random.seed(args.common.seed)
utils.set_torch_seed(args.common.seed)
if args.generation.sampling:
args.generation.nbest = args.generation.beam = arg_sample_size
task = tasks.setup_task(args.task)
overrides = ast.literal_eval(args.common_eval.model_overrides)
models, _model_args = checkpoint_utils.load_model_ensemble(
args.common_eval.path.split(os.pathsep),
arg_overrides=overrides,
task=task,
suffix=getattr(args, "checkpoint_suffix", ""),
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.prepare_for_inference_(args)
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
output_file = open(arg_output, 'w')
with open(arg_prompts, 'r') as fin:
lines = fin.readlines()
split = [x.split('|', 1) for x in lines]
seq_id = [x[0] for x in split]
prompts = [x[1] for x in split]
if args.generation.prefix_size >= 0:
prompts = [' '.join(l.split()[:args.generation.prefix_size])
for l in prompts]
if arg_debug:
prompts = prompts[:10]
generator = task.build_generator(models, args.generation)
start_id = 0
pbar = tqdm.tqdm(total=len(prompts))
for batch in make_batches(prompts, args, task, max_positions):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
results = []
translations = task.inference_step(generator, models, sample)
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((i + start_id, src_tokens_i, hypos))
# sort output to match input order
for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(
src_tokens, args.common_eval.post_process)
# Process top predictions
for hypo_id, hypo in enumerate(hypos):
_hypo_tokens, hypo_str, _alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.common_eval.post_process,
)
detok_hypo_str = hypo_str
utterance = detok_hypo_str
print(f'{seq_id[id]}__{hypo_id}|{utterance}', file=output_file)
pbar.update(1)
start_id += len(results)
# output_file.close()
def cli_main():
parser = options.get_interactive_generation_parser()
parser.add_argument('--prompts', type=str, default=None, required=True)
parser.add_argument('--output', type=str, default=None, required=True)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--samples-per-prompt', type=int, default=1)
args = options.parse_args_and_arch(parser)
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
main(args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/textless_nlp/gslm/ulm/sample.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from omegaconf import OmegaConf
from fairseq.criterions.model_criterion import ModelCriterionConfig
from fairseq.dataclass.configs import FairseqConfig
from tasks import ImageClassificationConfig, ImagePretrainingConfig
from models.data2vec_image_classification import (
Data2VecImageClassificationConfig,
Data2VecImageClassificationModel,
)
from models.data2vec_vision import Data2VecVisionConfig, Data2VecVisionModel
def get_parser():
parser = argparse.ArgumentParser(
description="convert beit checkpoint into data2vec - vision checkpoint"
)
# fmt: off
parser.add_argument('checkpoint', help='checkpoint to convert')
parser.add_argument('--output', required=True, metavar='PATH', help='where to output converted checkpoint')
parser.add_argument('--type', type=str, choices=['vision', 'image_classification'], default='image_classification', help='type of model to upgrade')
parser.add_argument('--inception_norms', action='store_true', default=False)
# fmt: on
return parser
def update_checkpoint(model_dict, prefix, is_nested):
replace_paths = {
"cls_token": "model.cls_emb" if is_nested else "cls_emb",
"patch_embed": "model.patch_embed" if is_nested else "patch_embed",
"mask_token": "mask_emb",
}
starts_with = {
"patch_embed.proj": "model.patch_embed.conv"
if is_nested
else "patch_embed.conv",
"lm_head": "final_proj",
"fc_norm": "fc_norm",
"head": "head",
}
partial = {
"mlp.fc1": "mlp.0",
"mlp.fc2": "mlp.2",
}
for k in list(model_dict.keys()):
for sw, r in starts_with.items():
if k.startswith(sw):
replace_paths[k] = k.replace(sw, r)
for p, r in partial.items():
if p in k:
replace_paths[k] = prefix + k.replace(p, r)
if prefix != "":
for k in list(model_dict.keys()):
if k not in replace_paths:
replace_paths[k] = prefix + k
for k in list(model_dict.keys()):
if k in replace_paths:
model_dict[replace_paths[k]] = model_dict[k]
if k != replace_paths[k]:
del model_dict[k]
return model_dict
def main():
parser = get_parser()
args = parser.parse_args()
cp = torch.load(args.checkpoint, map_location="cpu")
cfg = FairseqConfig(
criterion=ModelCriterionConfig(_name="model", log_keys=["correct"]),
)
if args.type == "image_classification":
cfg.task = ImageClassificationConfig(
_name="image_classification",
data=".",
)
if args.inception_norms:
cfg.task.normalization_mean = [0.5, 0.5, 0.5]
cfg.task.normalization_std = [0.5, 0.5, 0.5]
cfg.model = Data2VecImageClassificationConfig(
_name="data2vec_image_classification",
)
cfg.model.pretrained_model_args = FairseqConfig(
model=Data2VecVisionConfig(
_name="data2vec_vision", shared_rel_pos_bias=False
),
task=ImagePretrainingConfig(
_name="image_pretraining",
),
)
cfg = OmegaConf.create(cfg)
state = {
"cfg": OmegaConf.to_container(cfg, resolve=True, enum_to_str=True),
"model": cp["module"],
"best_loss": None,
"optimizer": None,
"extra_state": {},
}
model = Data2VecImageClassificationModel(cfg.model)
model.load_state_dict(
update_checkpoint(state["model"], prefix="model.encoder.", is_nested=True),
strict=True,
)
elif args.type == "vision":
cfg.task = ImagePretrainingConfig(
_name="image_pretraining",
data=".",
)
if args.inception_norms:
cfg.task.normalization_mean = [0.5, 0.5, 0.5]
cfg.task.normalization_std = [0.5, 0.5, 0.5]
cfg.model = Data2VecVisionConfig(
_name="data2vec_vision",
)
cfg = OmegaConf.create(cfg)
state = {
"cfg": OmegaConf.to_container(cfg, resolve=True, enum_to_str=True),
"model": cp["model"],
"best_loss": None,
"optimizer": None,
"extra_state": {},
}
model = Data2VecVisionModel(cfg.model)
model.load_state_dict(
update_checkpoint(state["model"], prefix="encoder.", is_nested=False),
strict=True,
)
else:
raise Exception("unsupported type " + args.type)
print(state["cfg"], state.keys())
torch.save(state, args.output)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/fb_convert_beit_cp.py |
EXA-1-master | exa/libraries/fairseq/examples/data2vec/__init__.py |
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import sys
import torch
from typing import Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from fairseq.logging import metrics
try:
from ..data import MaeFinetuningImageDataset
except:
sys.path.append("..")
from data import MaeFinetuningImageDataset
logger = logging.getLogger(__name__)
@dataclass
class MaeImageClassificationConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
input_size: int = 224
local_cache_path: Optional[str] = None
rebuild_batches: bool = True
@register_task("mae_image_classification", dataclass=MaeImageClassificationConfig)
class MaeImageClassificationTask(FairseqTask):
""" """
cfg: MaeImageClassificationConfig
@classmethod
def setup_task(cls, cfg: MaeImageClassificationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
cfg = task_cfg or self.cfg
self.datasets[split] = MaeFinetuningImageDataset(
root=data_path,
split=split,
is_train=split == "train",
input_size=cfg.input_size,
local_cache_path=cfg.local_cache_path,
shuffle=split == "train",
)
def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(model_cfg, from_checkpoint)
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None:
if hasattr(actualized_cfg, "pretrained_model_args"):
model_cfg.pretrained_model_args = actualized_cfg.pretrained_model_args
return model
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if "correct" in logging_outputs[0]:
zero = torch.scalar_tensor(0.0)
correct = sum(log.get("correct", zero) for log in logging_outputs)
metrics.log_scalar_sum("_correct", correct)
metrics.log_derived(
"accuracy",
lambda meters: 100 * meters["_correct"].sum / meters["sample_size"].sum,
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/mae_image_classification.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os.path as osp
import logging
from dataclasses import dataclass
import torch
from torchvision import transforms
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.logging import metrics
try:
from ..data import ImageDataset
except:
import sys
sys.path.append("..")
from data import ImageDataset
from .image_pretraining import (
ImagePretrainingConfig,
ImagePretrainingTask,
IMG_EXTENSIONS,
)
logger = logging.getLogger(__name__)
@dataclass
class ImageClassificationConfig(ImagePretrainingConfig):
pass
@register_task("image_classification", dataclass=ImageClassificationConfig)
class ImageClassificationTask(ImagePretrainingTask):
cfg: ImageClassificationConfig
@classmethod
def setup_task(cls, cfg: ImageClassificationConfig, **kwargs):
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
cfg = task_cfg or self.cfg
path_with_split = osp.join(data_path, split)
if osp.exists(path_with_split):
data_path = path_with_split
from timm.data import create_transform
if split == "train":
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=cfg.input_size,
is_training=True,
auto_augment="rand-m9-mstd0.5-inc1",
interpolation="bicubic",
re_prob=0.25,
re_mode="pixel",
re_count=1,
mean=cfg.normalization_mean,
std=cfg.normalization_std,
)
if not cfg.input_size > 32:
transform.transforms[0] = transforms.RandomCrop(
cfg.input_size, padding=4
)
else:
t = []
if cfg.input_size > 32:
crop_pct = 1
if cfg.input_size < 384:
crop_pct = 224 / 256
size = int(cfg.input_size / crop_pct)
t.append(
transforms.Resize(
size, interpolation=3
), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(cfg.input_size))
t.append(transforms.ToTensor())
t.append(
transforms.Normalize(cfg.normalization_mean, cfg.normalization_std)
)
transform = transforms.Compose(t)
logger.info(transform)
self.datasets[split] = ImageDataset(
root=data_path,
extensions=IMG_EXTENSIONS,
load_classes=True,
transform=transform,
)
for k in self.datasets.keys():
if k != split:
assert self.datasets[k].classes == self.datasets[split].classes
def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(model_cfg, from_checkpoint)
actualized_cfg = getattr(model, "cfg", None)
if actualized_cfg is not None:
if hasattr(actualized_cfg, "pretrained_model_args"):
model_cfg.pretrained_model_args = actualized_cfg.pretrained_model_args
return model
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if "correct" in logging_outputs[0]:
zero = torch.scalar_tensor(0.0)
correct = sum(log.get("correct", zero) for log in logging_outputs)
metrics.log_scalar_sum("_correct", correct)
metrics.log_derived(
"accuracy",
lambda meters: 100 * meters["_correct"].sum / meters["sample_size"].sum,
)
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/image_classification.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import sys
from dataclasses import dataclass
from typing import Optional, List
from omegaconf import II
from fairseq.data.iterators import GroupedEpochBatchIterator
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from fairseq.tasks.audio_pretraining import AudioPretrainingConfig, AudioPretrainingTask
from fairseq.tasks.masked_lm import MaskedLMConfig, MaskedLMTask
from .mae_image_pretraining import MaeImagePretrainingConfig, MaeImagePretrainingTask
from examples.data2vec.data.modality import Modality
from fairseq.data.audio.multi_modality_dataset import (
MultiModalityDataset,
ModalityDatasetItem,
)
@dataclass
class MultimodalPretrainingConfig(FairseqDataclass):
audio: Optional[AudioPretrainingConfig] = None
image: Optional[MaeImagePretrainingConfig] = None
text: Optional[MaskedLMConfig] = None
audio_ratio: float = 1
image_ratio: float = 1
text_ratio: float = 1
max_tokens: Optional[int] = II("dataset.max_tokens")
batch_size: Optional[int] = II("dataset.batch_size")
update_freq: List[int] = II("optimization.update_freq")
rebuild_batches: bool = True
@register_task("multimodal_pretraining", dataclass=MultimodalPretrainingConfig)
class MultimodalPretrainingTask(FairseqTask):
""" """
cfg: MultimodalPretrainingConfig
def __init__(self, cfg: MultimodalPretrainingConfig):
super().__init__(cfg)
self.audio_task = (
AudioPretrainingTask(cfg.audio) if cfg.audio is not None else None
)
self.image_task = (
MaeImagePretrainingTask(cfg.image) if cfg.image is not None else None
)
self.text_task = MaskedLMTask(cfg.text) if cfg.text is not None else None
self.mult_ratios = []
@classmethod
def setup_task(cls, cfg: MultimodalPretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
datasets = []
self.mult_ratios = []
def load_ds(task, name, ratio):
if task is not None:
task.load_dataset(split)
ds = ModalityDatasetItem(
datasetname=name,
dataset=task.dataset(split),
max_positions=task.max_positions(),
max_tokens=self.cfg.max_tokens,
max_sentences=self.cfg.batch_size,
)
datasets.append(ds)
self.mult_ratios.append(ratio)
load_ds(self.audio_task, Modality.AUDIO, self.cfg.audio_ratio)
load_ds(self.image_task, Modality.IMAGE, self.cfg.image_ratio)
load_ds(self.text_task, Modality.TEXT, self.cfg.text_ratio)
assert len(datasets) > 0
self.datasets[split] = MultiModalityDataset(datasets)
@property
def supported_modalities(self):
modalities = []
if self.cfg.text is not None:
modalities.append(Modality.TEXT)
if self.cfg.audio is not None:
modalities.append(Modality.AUDIO)
if self.cfg.image is not None:
modalities.append(Modality.IMAGE)
return modalities
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(
self.mult_ratios, required_batch_size_multiple, seed
)
# return a reusable, sharded iterator
epoch_iter = GroupedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_samplers=batch_samplers,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
mult_rate=max(self.cfg.update_freq),
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch
return epoch_iter
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/multimodal.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import numpy as np
import math
import torch
from sklearn import metrics as sklearn_metrics
from dataclasses import dataclass
from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig
from fairseq.tasks import register_task
from fairseq.logging import metrics
from ..data.add_class_target_dataset import AddClassTargetDataset
logger = logging.getLogger(__name__)
@dataclass
class AudioClassificationConfig(AudioPretrainingConfig):
label_descriptors: str = "label_descriptors.csv"
labels: str = "lbl"
@register_task("audio_classification", dataclass=AudioClassificationConfig)
class AudioClassificationTask(AudioPretrainingTask):
""" """
cfg: AudioClassificationConfig
def __init__(
self,
cfg: AudioClassificationConfig,
):
super().__init__(cfg)
self.state.add_factory("labels", self.load_labels)
def load_labels(self):
labels = {}
path = os.path.join(self.cfg.data, self.cfg.label_descriptors)
with open(path, "r") as ldf:
for line in ldf:
if line.strip() == "":
continue
items = line.split(",")
idx = items[0]
lbl = items[1]
assert lbl not in labels, lbl
labels[lbl] = idx
return labels
@property
def labels(self):
return self.state.labels
def load_dataset(
self, split: str, task_cfg: AudioClassificationConfig = None, **kwargs
):
super().load_dataset(split, task_cfg, **kwargs)
task_cfg = task_cfg or self.cfg
data_path = self.cfg.data
label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}")
skipped_indices = getattr(self.datasets[split], "skipped_indices", set())
labels = []
with open(label_path, "r") as f:
for i, line in enumerate(f):
if i not in skipped_indices:
lbl_items = line.rstrip().split("\t")
labels.append([int(x) for x in lbl_items[2].split(",")])
assert len(labels) == len(self.datasets[split]), (
f"labels length ({len(labels)}) and dataset length "
f"({len(self.datasets[split])}) do not match"
)
self.datasets[split] = AddClassTargetDataset(
self.datasets[split],
labels,
multi_class=True,
add_to_input=True,
num_classes=len(self.labels),
)
def calculate_stats(self, output, target):
classes_num = target.shape[-1]
stats = []
# Accuracy, only used for single-label classification such as esc-50, not for multiple label one such as AudioSet
# acc = sklearn_metrics.accuracy_score(np.argmax(target, 1), np.argmax(output, 1))
# Class-wise statistics
for k in range(classes_num):
# Average precision
avg_precision = sklearn_metrics.average_precision_score(
target[:, k], output[:, k], average=None
)
dict = {
"AP": avg_precision,
}
# # AUC
# try:
# auc = sklearn_metrics.roc_auc_score(target[:, k], output[:, k], average=None)
# except:
# auc = 0
#
# # Precisions, recalls
# (precisions, recalls, thresholds) = sklearn_metrics.precision_recall_curve(
# target[:, k], output[:, k]
# )
#
# # FPR, TPR
# (fpr, tpr, thresholds) = sklearn_metrics.roc_curve(target[:, k], output[:, k])
#
# save_every_steps = 1000 # Sample statistics to reduce size
# dict = {
# "precisions": precisions[0::save_every_steps],
# "recalls": recalls[0::save_every_steps],
# "AP": avg_precision,
# "fpr": fpr[0::save_every_steps],
# "fnr": 1.0 - tpr[0::save_every_steps],
# "auc": auc,
# # note acc is not class-wise, this is just to keep consistent with other metrics
# "acc": acc,
# }
stats.append(dict)
return stats
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if "_predictions" in logging_outputs[0]:
metrics.log_concat_tensor(
"_predictions",
torch.cat([l["_predictions"].cpu() for l in logging_outputs], dim=0),
)
metrics.log_concat_tensor(
"_targets",
torch.cat([l["_targets"].cpu() for l in logging_outputs], dim=0),
)
def compute_stats(meters):
if meters["_predictions"].tensor.shape[0] < 100:
return 0
stats = self.calculate_stats(
meters["_predictions"].tensor, meters["_targets"].tensor
)
return np.nanmean([stat["AP"] for stat in stats])
metrics.log_derived("mAP", compute_stats)
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/audio_classification.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .image_pretraining import ImagePretrainingTask, ImagePretrainingConfig
from .image_classification import ImageClassificationTask, ImageClassificationConfig
from .mae_image_pretraining import MaeImagePretrainingTask, MaeImagePretrainingConfig
__all__ = [
"ImageClassificationTask",
"ImageClassificationConfig",
"ImagePretrainingTask",
"ImagePretrainingConfig",
"MaeImagePretrainingTask",
"MaeImagePretrainingConfig",
] | EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import sys
import os.path as osp
from dataclasses import dataclass, field
from typing import List
from omegaconf import MISSING
import torch
from torchvision import transforms
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
try:
from ..data import ImageDataset
except:
sys.path.append("..")
from data import ImageDataset
logger = logging.getLogger(__name__)
IMG_EXTENSIONS = {
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
}
@dataclass
class ImagePretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
input_size: int = 224
normalization_mean: List[float] = (0.485, 0.456, 0.406)
normalization_std: List[float] = (0.229, 0.224, 0.225)
@register_task("image_pretraining", dataclass=ImagePretrainingConfig)
class ImagePretrainingTask(FairseqTask):
""" """
cfg: ImagePretrainingConfig
@classmethod
def setup_task(cls, cfg: ImagePretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
cfg = task_cfg or self.cfg
path_with_split = osp.join(data_path, split)
if osp.exists(path_with_split):
data_path = path_with_split
transform = transforms.Compose(
[
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomResizedCrop(
size=cfg.input_size,
interpolation=transforms.InterpolationMode.BICUBIC,
),
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(cfg.normalization_mean),
std=torch.tensor(cfg.normalization_std),
),
]
)
logger.info(transform)
self.datasets[split] = ImageDataset(
root=data_path,
extensions=IMG_EXTENSIONS,
load_classes=False,
transform=transform,
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/image_pretraining.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import sys
from typing import Optional, List
from dataclasses import dataclass, field
from omegaconf import MISSING, II
from fairseq.data import SubsampleDataset
from fairseq.dataclass import FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
try:
from ..data import MaeImageDataset
except:
sys.path.append("..")
from data import MaeImageDataset
logger = logging.getLogger(__name__)
@dataclass
class ImageMaskingConfig:
patch_size: int = II("model.modalities.image.patch_size")
mask_prob: float = II("model.modalities.image.mask_prob")
mask_prob_adjust: float = II("model.modalities.image.mask_prob_adjust")
mask_length: int = II("model.modalities.image.mask_length")
inverse_mask: bool = II("model.modalities.image.inverse_mask")
mask_dropout: float = II("model.modalities.image.mask_dropout")
clone_batch: int = II("model.clone_batch")
expand_adjacent: bool = False
non_overlapping: bool = False
@dataclass
class MaeImagePretrainingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
multi_data: Optional[List[str]] = None
input_size: int = 224
local_cache_path: Optional[str] = None
key: str = "imgs"
beit_transforms: bool = False
target_transform: bool = False
no_transform: bool = False
rebuild_batches: bool = True
precompute_mask_config: Optional[ImageMaskingConfig] = None
subsample: float = 1
seed: int = II("common.seed")
dataset_type: str = "imagefolder"
@register_task("mae_image_pretraining", dataclass=MaeImagePretrainingConfig)
class MaeImagePretrainingTask(FairseqTask):
""" """
cfg: MaeImagePretrainingConfig
@classmethod
def setup_task(cls, cfg: MaeImagePretrainingConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
cfg = task_cfg or self.cfg
compute_mask = cfg.precompute_mask_config is not None
mask_args = {}
if compute_mask:
mask_args = cfg.precompute_mask_config
self.datasets[split] = MaeImageDataset(
root=data_path if cfg.multi_data is None else cfg.multi_data,
split=split,
input_size=cfg.input_size,
local_cache_path=cfg.local_cache_path,
key=cfg.key,
beit_transforms=cfg.beit_transforms,
target_transform=cfg.target_transform,
no_transform=cfg.no_transform,
compute_mask=compute_mask,
dataset_type=cfg.dataset_type,
**mask_args,
)
if cfg.subsample < 1:
self.datasets[split] = SubsampleDataset(
self.datasets[split],
cfg.subsample,
shuffle=True,
seed=cfg.seed,
)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
def max_positions(self):
"""Maximum input length supported by the encoder."""
return sys.maxsize, sys.maxsize
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/tasks/mae_image_pretraining.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is adapted from the BeiT implementation which can be found here:
# https://github.com/microsoft/unilm/tree/master/beit
import logging
from dataclasses import dataclass
from typing import Any
from omegaconf import II, MISSING
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, tasks
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.roberta.model import RobertaClassificationHead
from examples.data2vec.data.modality import Modality
logger = logging.getLogger(__name__)
@dataclass
class Data2VecTextClassificationConfig(FairseqDataclass):
pooler_dropout: float = 0.0
pooler_activation_fn: str = "tanh"
quant_noise_pq: int = 0
quant_noise_pq_block_size: int = 8
spectral_norm_classification_head: bool = False
model_path: str = MISSING
no_pretrained_weights: bool = False
pretrained_model_args: Any = None
@register_model(
"data2vec_text_classification", dataclass=Data2VecTextClassificationConfig
)
class Data2VecTextClassificationModel(BaseFairseqModel):
def __init__(self, cfg: Data2VecTextClassificationConfig):
super().__init__()
self.cfg = cfg
if cfg.pretrained_model_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.model_path, {})
pretrained_args = state.get("cfg", None)
pretrained_args.criterion = None
pretrained_args.lr_scheduler = None
cfg.pretrained_model_args = pretrained_args
logger.info(pretrained_args)
else:
state = None
pretrained_args = cfg.pretrained_model_args
task = tasks.setup_task(pretrained_args.task)
model = task.build_model(pretrained_args.model, from_checkpoint=True)
model.remove_pretraining_modules()
self.model = model
if state is not None and not cfg.no_pretrained_weights:
self.load_model_weights(state, model, cfg)
self.classification_heads = nn.ModuleDict()
def load_model_weights(self, state, model, cfg):
for k in list(state["model"].keys()):
if (
k.startswith("shared_decoder") or
k.startswith("_ema") or
"decoder" in k
):
logger.info(f"Deleting {k} from checkpoint")
del state["model"][k]
model.load_state_dict(state["model"], strict=True)
@classmethod
def build_model(cls, cfg: Data2VecTextClassificationConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
embed_dim = self.cfg.pretrained_model_args.model.embed_dim
self.classification_heads[name] = RobertaClassificationHead(
input_dim=embed_dim,
inner_dim=inner_dim or embed_dim,
num_classes=num_classes,
activation_fn=self.cfg.pooler_activation_fn,
pooler_dropout=self.cfg.pooler_dropout,
q_noise=self.cfg.quant_noise_pq,
qn_block_size=self.cfg.quant_noise_pq_block_size,
do_spectral_norm=self.cfg.spectral_norm_classification_head,
)
def forward(
self,
source,
id,
padding_mask,
features_only=True,
remove_extra_tokens=True,
classification_head_name=None,
):
encoder_out = self.model(
source,
id=id,
mode=Modality.TEXT,
padding_mask=padding_mask,
mask=False,
features_only=features_only,
remove_extra_tokens=remove_extra_tokens
)
logits = self.classification_heads[classification_head_name](encoder_out["x"])
return logits, encoder_out
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec_text_classification.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
)
from fairseq.models.roberta.model import RobertaLMHead, RobertaClassificationHead
from fairseq.models.transformer import TransformerEncoder, TransformerConfig
from fairseq.modules.transformer_sentence_encoder import init_bert_params
logger = logging.getLogger(__name__)
@dataclass
class Data2VecTextConfig(FairseqDataclass):
max_positions: int = II("task.tokens_per_sample")
head_layers: int = 1
transformer: TransformerConfig = TransformerConfig()
load_checkpoint_heads: bool = field(
default=False,
metadata={"help": "(re-)register and load heads when loading checkpoints"},
)
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
layer_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
batch_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_transformer_layers_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer layers"},
)
def get_annealed_rate(start, end, curr_step, total_steps):
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
@register_model("data2vec_text", dataclass=Data2VecTextConfig)
class Data2VecTextModel(FairseqEncoderModel):
def __init__(self, cfg: Data2VecTextConfig, encoder):
super().__init__(encoder)
self.cfg = cfg
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
encoder = Data2VecTextEncoder(cfg, task.source_dictionary, task.cfg.data)
return cls(cfg, encoder)
def forward(
self,
src_tokens,
target_tokens=None,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs,
):
if classification_head_name is not None:
features_only = True
res = self.encoder(
src_tokens, target_tokens, features_only, return_all_hiddens, **kwargs
)
if isinstance(res, tuple):
x, extra = res
else:
return res
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = RobertaClassificationHead(
input_dim=self.cfg.transformer.encoder.embed_dim,
inner_dim=inner_dim or self.cfg.transformer.encoder.embed_dim,
num_classes=num_classes,
activation_fn="tanh",
pooler_dropout=0,
)
@property
def supported_targets(self):
return {"self"}
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# rename decoder -> encoder before upgrading children modules
for k in list(state_dict.keys()):
if k.startswith(prefix + "decoder"):
new_k = prefix + "encoder" + k[len(prefix + "decoder") :]
state_dict[new_k] = state_dict[k]
del state_dict[k]
# rename emb_layer_norm -> layernorm_embedding
for k in list(state_dict.keys()):
if ".emb_layer_norm." in k:
new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
if self.encoder.regression_head is not None:
if ".lm_head." in k:
new_k = k.replace(".lm_head.", ".regression_head.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
else:
if ".regression_head." in k:
del state_dict[k]
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
or self.classification_heads is None
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if self.cfg.load_checkpoint_heads:
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if (
hasattr(self, "classification_heads")
and self.classification_heads is not None
and len(self.classification_heads) > 0
):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
for k in list(state_dict.keys()):
if k.startswith(prefix + "encoder.lm_head.") or k.startswith(
prefix + "encoder.emb_head."
):
del state_dict[k]
self.encoder.lm_head = None
if self.encoder.target_model is None:
for k in list(state_dict.keys()):
if k.startswith(prefix + "encoder.target_model."):
del state_dict[k]
if (self.encoder.ema is None) and (prefix + "encoder._ema" in state_dict):
del state_dict[prefix + "encoder._ema"]
def remove_pretraining_modules(self, last_layer=None):
self.encoder.lm_head = None
self.encoder.regression_head = None
self.encoder.ema = None
self.classification_heads = None
if last_layer is not None:
self.encoder.sentence_encoder.layers = nn.ModuleList(
l
for i, l in enumerate(self.encoder.sentence_encoder.layers)
if i <= last_layer
)
self.encoder.sentence_encoder.layer_norm = None
class Data2VecTextEncoder(FairseqEncoder):
def __init__(self, cfg: Data2VecTextConfig, dictionary, task_data):
super().__init__(dictionary)
self.cfg = cfg
embed_tokens = self.build_embedding(
len(dictionary), cfg.transformer.encoder.embed_dim, dictionary.pad()
)
self.sentence_encoder = self.build_encoder(cfg, dictionary, embed_tokens)
self.mask_idx = dictionary.index("<mask>")
assert self.mask_idx != dictionary.unk(), dictionary.symbols
self.ema = None
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_scale = cfg.loss_scale
assert self.cfg.head_layers >= 1
embed_dim = cfg.transformer.encoder.embed_dim
curr_dim = embed_dim
projs = []
for i in range(self.cfg.head_layers - 1):
next_dim = embed_dim * 2 if i == 0 else curr_dim
projs.append(nn.Linear(curr_dim, next_dim))
projs.append(nn.GELU())
curr_dim = next_dim
projs.append(nn.Linear(curr_dim, embed_dim))
self.regression_head = nn.Sequential(*projs)
self.num_updates = 0
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, cfg, dictionary, embed_tokens):
encoder = TransformerEncoder(cfg.transformer, dictionary, embed_tokens, return_fc=True)
encoder.apply(init_bert_params)
return encoder
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return RobertaLMHead(embed_dim, output_dim, activation_fn, weight)
def make_ema_teacher(self):
ema_config = EMAModuleConfig(
ema_decay=self.cfg.ema_decay,
ema_fp32=True,
)
skip_keys = set()
if self.cfg.ema_transformer_layers_only:
for k, _ in self.sentence_encoder.embed_positions.named_parameters():
skip_keys.add(f"embed_tokens.{k}")
for k, _ in self.sentence_encoder.embed_positions.named_parameters():
skip_keys.add(f"embed_positions.{k}")
if self.sentence_encoder.layernorm_embedding is not None:
for (
k,
_,
) in self.sentence_encoder.layernorm_embedding.named_parameters():
skip_keys.add(f"layernorm_embedding.{k}")
if self.sentence_encoder.layer_norm is not None:
for k, _ in self.sentence_encoder.layer_norm.named_parameters():
skip_keys.add(f"layernorm_embedding.{k}")
self.ema = EMAModule(
self.sentence_encoder,
ema_config,
skip_keys=skip_keys,
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is None and self.regression_head is not None:
logger.info(f"making ema teacher")
self.make_ema_teacher()
elif self.training and self.ema is not None:
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay)
if self.ema.get_decay() < 1:
self.ema.step(self.sentence_encoder)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if self.ema is not None:
k = prefix + "_ema"
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def forward(
self,
src_tokens,
target_tokens=None,
features_only=False,
return_all_hiddens=False,
masked_tokens=None,
**unused,
):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(
src_tokens, return_all_hiddens=return_all_hiddens
)
if features_only:
return x, extra
assert target_tokens is not None
with torch.no_grad():
# use EMA parameter as the teacher
self.ema.model.eval()
encoder_out = self.ema.model(
target_tokens,
return_all_hiddens=True,
)
y = encoder_out["fc_results"]
y = y[-self.average_top_k_layers :]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
y = [tl.permute(1, 2, 0) for tl in y] # TBC -> BCT
permuted = True
if self.cfg.batch_norm_target_layer:
y = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in y
]
if self.cfg.instance_norm_target_layer:
y = [F.instance_norm(tl.float()) for tl in y]
if permuted:
y = [tl.transpose(1, 2) for tl in y] # BCT -> BTC
if self.cfg.layer_norm_target_layer:
y = [F.layer_norm(tl.float(), tl.shape[-1:]) for tl in y]
y = sum(y) / len(y)
if not permuted:
y = y.transpose(0, 1)
if self.cfg.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.transpose(1, 2)).transpose(1, 2)
masked_indices = src_tokens.eq(self.mask_idx)
x = x[masked_indices]
y = y[masked_indices]
x = self.regression_head(x)
sz = x.size(-1)
if self.cfg.loss_beta == 0:
loss = F.mse_loss(x.float(), y.float(), reduction="none").sum(dim=-1)
else:
loss = F.smooth_l1_loss(
x.float(), y.float(), reduction="none", beta=self.cfg.loss_beta
).sum(dim=-1)
result = {
"losses": {
"main": loss.sum() / math.sqrt(sz)
if self.loss_scale <= 0
else loss.sum() * self.loss_scale,
},
"sample_size": loss.numel(),
}
# logging other values
other_logs = {
"ema_decay": self.ema.get_decay() * 1000
}
result["logs"] = other_logs
return result
def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs):
encoder_out = self.sentence_encoder(
src_tokens,
return_all_hiddens=return_all_hiddens,
token_embeddings=kwargs.get("token_embeddings", None),
)
# T x B x C -> B x T x C
features = encoder_out["encoder_out"][0].transpose(0, 1)
inner_states = encoder_out["encoder_states"] if return_all_hiddens else None
return features, {
"inner_states": inner_states,
"encoder_embedding": encoder_out["encoder_embedding"][0],
}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.cfg.max_positions
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec_text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is adapted from the BeiT implementation which can be found here:
# https://github.com/microsoft/unilm/tree/master/beit
import logging
from dataclasses import dataclass
from enum import Enum, auto
from typing import Any, Optional
import numpy as np
from omegaconf import II, MISSING
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, tasks
from omegaconf import open_dict
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from .mae import interpolate_pos_embed
logger = logging.getLogger(__name__)
class PredictionMode(Enum):
MEAN_POOLING = auto()
CLS_TOKEN = auto()
LIN_SOFTMAX = auto()
@dataclass
class MaeImageClassificationConfig(FairseqDataclass):
model_path: str = MISSING
no_pretrained_weights: bool = False
linear_classifier: bool = False
num_classes: int = 1000
mixup: float = 0.8
cutmix: float = 1.0
label_smoothing: float = 0.1
drop_path_rate: float = 0.1
layer_decay: float = 0.65
mixup_prob: float = 1.0
mixup_switch_prob: float = 0.5
mixup_mode: str = "batch"
pretrained_model_args: Any = None
data: str = II("task.data")
norm_eps: Optional[float] = None
remove_alibi: bool = False
# regularization overwrites
encoder_dropout: float = 0
post_mlp_drop: float = 0
attention_dropout: float = 0
activation_dropout: float = 0.0
dropout_input: float = 0.0
layerdrop: float = 0.0
prenet_layerdrop: float = 0
prenet_dropout: float = 0
use_fc_norm: bool = True
prediction_mode: PredictionMode = PredictionMode.MEAN_POOLING
no_decay_blocks: bool = True
def get_layer_id_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ["cls_token", "pos_embed"]:
return 0
elif name.startswith("patch_embed"):
return 0
elif name.startswith("rel_pos_bias"):
return num_layers - 1
elif name.startswith("blocks"):
return int(name.split(".")[1]) + 1
else:
return num_layers
@register_model("mae_image_classification", dataclass=MaeImageClassificationConfig)
class MaeImageClassificationModel(BaseFairseqModel):
def __init__(self, cfg: MaeImageClassificationConfig):
super().__init__()
self.cfg = cfg
if cfg.pretrained_model_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.model_path, {})
pretrained_args = state.get("cfg", None)
pretrained_args.criterion = None
pretrained_args.lr_scheduler = None
logger.info(pretrained_args.model)
with open_dict(pretrained_args.model):
pretrained_args.model.drop_path_rate = cfg.drop_path_rate
if cfg.norm_eps is not None:
pretrained_args.model.norm_eps = cfg.norm_eps
cfg.pretrained_model_args = pretrained_args
logger.info(pretrained_args)
else:
state = None
pretrained_args = cfg.pretrained_model_args
if "data" in pretrained_args.task:
pretrained_args.task.data = cfg.data
elif "image" in pretrained_args.task:
pretrained_args.task.image.data = cfg.data
if "modalities" in pretrained_args.model:
prenet_blocks = pretrained_args.model["modalities"]["image"]["prenet_depth"]
model_blocks = pretrained_args.model["depth"]
with open_dict(pretrained_args):
dpr = np.linspace(0, cfg.drop_path_rate, model_blocks).tolist()
pretrained_args.model["modalities"]["image"][
"start_drop_path_rate"
] = dpr[0]
pretrained_args.model["modalities"]["image"][
"end_drop_path_rate"
] = max(0, dpr[prenet_blocks - 1])
pretrained_args.model["start_drop_path_rate"] = dpr[prenet_blocks]
pretrained_args.model["end_drop_path_rate"] = dpr[-1]
if "mae_masking" in pretrained_args.model["modalities"]["image"]:
del pretrained_args.model["modalities"]["image"]["mae_masking"]
if cfg.remove_alibi:
pretrained_args.model["modalities"]["image"][
"use_alibi_encoder"
] = False
if (
state is not None
and "modality_encoders.IMAGE.alibi_bias" in state["model"]
):
del state["model"]["modality_encoders.IMAGE.alibi_bias"]
pretrained_args.model["encoder_dropout"] = cfg.encoder_dropout
pretrained_args.model["post_mlp_drop"] = cfg.post_mlp_drop
pretrained_args.model["attention_dropout"] = cfg.attention_dropout
pretrained_args.model["activation_dropout"] = cfg.activation_dropout
pretrained_args.model["dropout_input"] = cfg.dropout_input
pretrained_args.model["layerdrop"] = cfg.layerdrop
pretrained_args.model["modalities"]["image"][
"prenet_layerdrop"
] = cfg.prenet_layerdrop
pretrained_args.model["modalities"]["image"][
"prenet_dropout"
] = cfg.prenet_dropout
else:
# not d2v multi
with open_dict(pretrained_args):
pretrained_args.model["drop_path_rate"] = cfg.drop_path_rate
pretrained_args.model["block_dropout"] = cfg.encoder_dropout
pretrained_args.model["attention_dropout"] = cfg.attention_dropout
pretrained_args.model["activation_dropout"] = cfg.activation_dropout
task = tasks.setup_task(pretrained_args.task)
model = task.build_model(pretrained_args.model, from_checkpoint=True)
self.d2v_multi = "data2vec_multi" in pretrained_args.model._name
self.linear_classifier = cfg.linear_classifier
self.model = model
if state is not None and not cfg.no_pretrained_weights:
interpolate_pos_embed(model, state)
if "modality_encoders.IMAGE.positional_encoder.pos_embed" in state["model"]:
state["model"][
"modality_encoders.IMAGE.positional_encoder.positions"
] = state["model"][
"modality_encoders.IMAGE.positional_encoder.pos_embed"
]
del state["model"][
"modality_encoders.IMAGE.positional_encoder.pos_embed"
]
if "modality_encoders.IMAGE.encoder_mask" in state["model"]:
del state["model"]["modality_encoders.IMAGE.encoder_mask"]
model.load_state_dict(state["model"], strict=True)
if self.d2v_multi:
model.remove_pretraining_modules(modality="image")
else:
model.remove_pretraining_modules()
if self.linear_classifier:
model.requires_grad_(False)
self.fc_norm = None
if self.cfg.use_fc_norm:
self.fc_norm = nn.LayerNorm(pretrained_args.model.embed_dim, eps=1e-6)
nn.init.constant_(self.fc_norm.bias, 0)
nn.init.constant_(self.fc_norm.weight, 1.0)
self.head = nn.Linear(pretrained_args.model.embed_dim, cfg.num_classes)
nn.init.trunc_normal_(self.head.weight, std=0.02)
nn.init.constant_(self.head.bias, 0)
self.mixup_fn = None
if cfg.mixup > 0 or cfg.cutmix > 0:
from timm.data import Mixup
self.mixup_fn = Mixup(
mixup_alpha=cfg.mixup,
cutmix_alpha=cfg.cutmix,
cutmix_minmax=None,
prob=cfg.mixup_prob,
switch_prob=cfg.mixup_switch_prob,
mode=cfg.mixup_mode,
label_smoothing=cfg.label_smoothing,
num_classes=cfg.num_classes,
)
if self.model.norm is not None:
for pn, p in self.model.norm.named_parameters():
if len(p.shape) == 1 or pn.endswith(".bias"):
p.optim_overrides = {"optimizer": {"weight_decay_scale": 0}}
if self.fc_norm is not None:
for pn, p in self.fc_norm.named_parameters():
if len(p.shape) == 1 or pn.endswith(".bias"):
p.optim_overrides = {"optimizer": {"weight_decay_scale": 0}}
for pn, p in self.head.named_parameters():
if len(p.shape) == 1 or pn.endswith(".bias"):
p.optim_overrides = {"optimizer": {"weight_decay_scale": 0}}
if self.d2v_multi:
mod_encs = list(model.modality_encoders.values())
assert len(mod_encs) == 1, len(mod_encs)
blocks = list(mod_encs[0].context_encoder.blocks) + list(model.blocks)
else:
blocks = model.blocks
num_layers = len(blocks) + 1
layer_scales = list(
cfg.layer_decay ** (num_layers - i) for i in range(num_layers + 1)
)
if self.d2v_multi:
for n, p in self.model.named_parameters():
optimizer_override_dict = {}
if len(p.shape) == 1 or n.endswith(".bias"):
optimizer_override_dict["weight_decay_scale"] = 0
p.optim_overrides = {"optimizer": optimizer_override_dict}
if cfg.layer_decay > 0:
for i, b in enumerate(blocks):
lid = i + 1
if layer_scales[lid] == 1.0:
continue
for n, p in b.named_parameters():
optim_override = getattr(p, "optim_overrides", {})
if "optimizer" not in optim_override:
optim_override["optimizer"] = {}
if cfg.no_decay_blocks:
optim_override["optimizer"]["lr_scale"] = layer_scales[lid]
p.optim_overrides = optim_override
else:
optim_override["optimizer"] = {
"lr_scale": layer_scales[lid]
}
p.optim_overrides = optim_override
else:
for n, p in self.model.named_parameters():
optimizer_override_dict = {}
layer_id = get_layer_id_for_vit(n, num_layers)
if len(p.shape) == 1 or n.endswith(".bias"):
optimizer_override_dict["weight_decay_scale"] = 0
if cfg.layer_decay > 0:
optimizer_override_dict["lr_scale"] = layer_scales[layer_id]
p.optim_overrides = {"optimizer": optimizer_override_dict}
@classmethod
def build_model(cls, cfg: MaeImageClassificationConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def forward(
self,
imgs,
labels=None,
):
if self.training and self.mixup_fn is not None and labels is not None:
imgs, labels = self.mixup_fn(imgs, labels)
if self.linear_classifier:
with torch.no_grad():
x = self.model_forward(imgs)
else:
x = self.model_forward(imgs)
if self.cfg.prediction_mode == PredictionMode.MEAN_POOLING:
x = x.mean(dim=1)
elif self.cfg.prediction_mode == PredictionMode.CLS_TOKEN:
x = x[:, 0]
elif self.cfg.prediction_mode == PredictionMode.LIN_SOFTMAX:
dtype = x.dtype
x = F.logsigmoid(x.float())
x = torch.logsumexp(x + x, dim=1) - torch.logsumexp(x + 1e-6, dim=1)
x = x.clamp(max=0)
x = x - torch.log(-(torch.expm1(x)))
x = torch.nan_to_num(x, nan=0, posinf=0, neginf=0)
x = x.to(dtype=dtype)
else:
raise Exception(f"unknown prediction mode {self.cfg.prediction_mode.name}")
if self.fc_norm is not None:
x = self.fc_norm(x)
x = self.head(x)
if labels is None:
return x
if self.training and self.mixup_fn is not None:
loss = -labels * F.log_softmax(x.float(), dim=-1)
else:
loss = F.cross_entropy(
x.float(),
labels,
label_smoothing=self.cfg.label_smoothing if self.training else 0,
reduction="none",
)
result = {
"losses": {"regression": loss},
"sample_size": imgs.size(0),
}
if not self.training:
with torch.no_grad():
pred = x.argmax(-1)
correct = (pred == labels).sum()
result["correct"] = correct
return result
def model_forward(self, imgs):
if self.d2v_multi:
x = self.model.extract_features(
imgs,
mode="IMAGE",
mask=False,
remove_extra_tokens=(
self.cfg.prediction_mode != PredictionMode.CLS_TOKEN
),
)["x"]
else:
x = self.model(imgs, predictions_only=True)
if (
"no_cls" not in self.model.cfg or not self.model.cfg.no_cls
) and not self.cfg.prediction_mode == PredictionMode.CLS_TOKEN:
x = x[:, 1:]
return x
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/mae_image_classification.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is adapted from the BeiT implementation which can be found here:
# https://github.com/microsoft/unilm/tree/master/beit
import logging
from dataclasses import dataclass
from functools import partial
from timm.models.vision_transformer import PatchEmbed, Block
import torch
import torch.nn as nn
import numpy as np
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import TransformerSentenceEncoderLayer
try:
from apex.normalization import FusedLayerNorm
except:
FusedLayerNorm = nn.LayerNorm
import torch.nn.functional as F
logger = logging.getLogger(__name__)
@dataclass
class MaeConfig(FairseqDataclass):
input_size: int = 224
in_chans: int = 3
patch_size: int = 16
embed_dim: int = 768
depth: int = 12
num_heads: int = 12
decoder_embed_dim: int = 512
decoder_depth: int = 8
decoder_num_heads: int = 16
mlp_ratio: int = 4
norm_eps: float = 1e-6
drop_path_rate: float = 0.0
mask_ratio: float = 0.75
norm_pix_loss: bool = True
w2v_block: bool = False
alt_block: bool = False
alt_block2: bool = False
alt_attention: bool = False
block_dropout: float = 0
attention_dropout: float = 0
activation_dropout: float = 0
layer_norm_first: bool = False
fused_ln: bool = True
end_of_block_targets: bool = True
no_decoder_embed: bool = False
no_decoder_pos_embed: bool = False
mask_noise_std: float = 0
single_qkv: bool = False
use_rel_pos_bias: bool = False
no_cls: bool = False
def modify_relative_position_bias(orig_bias, bsz, mask):
if mask is None:
return orig_bias.unsqueeze(0).repeat(
bsz, 1, 1, 1
) # heads x seq_len x seq_len => bsz x heads x seq_len x seq_len
heads, max_seq_len, max_seq_len = orig_bias.shape # includes CLS token
mask_for_rel_pos_bias = torch.cat(
(torch.zeros(bsz, 1, dtype=mask.dtype, device=mask.device), mask), dim=1
).bool() # bsz x seqlen (add CLS token)
unmasked_for_rel_pos_bias = ~mask_for_rel_pos_bias
unmasked_for_rel_pos_bias = unmasked_for_rel_pos_bias.unsqueeze(1).repeat(
1, heads, 1
) # bsz x seq_len => bsz x heads x seq_len
b_t_t_rel_pos_bias = orig_bias.unsqueeze(0).repeat(
bsz, 1, 1, 1
) # heads x seq_len x seq_len => bsz x heads x seq_len x seq_len
b_t_t_rel_pos_bias = b_t_t_rel_pos_bias.masked_select(
unmasked_for_rel_pos_bias.unsqueeze(-1)
)
b_t_t_rel_pos_bias = b_t_t_rel_pos_bias.view(bsz, heads, -1, max_seq_len)
new_len = b_t_t_rel_pos_bias.size(-2)
b_t_t_rel_pos_bias = b_t_t_rel_pos_bias.masked_select(
unmasked_for_rel_pos_bias.unsqueeze(-2)
)
b_t_t_rel_pos_bias = b_t_t_rel_pos_bias.view(bsz, heads, new_len, new_len)
return b_t_t_rel_pos_bias
class AltBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
layer_norm_first=True,
ffn_targets=False,
use_rel_pos_bias=False,
window_size=None,
alt_attention=False,
):
super().__init__()
self.layer_norm_first = layer_norm_first
self.ffn_targets = ffn_targets
from timm.models.vision_transformer import Attention, DropPath, Mlp
self.norm1 = norm_layer(dim)
self.use_rel_pos_bias = use_rel_pos_bias
if use_rel_pos_bias:
self.attn = AltAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
window_size=window_size,
)
else:
if alt_attention:
from .multi.modules import AltAttention as AltAttention2
self.attn = AltAttention2(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
else:
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, rel_pos_bias=None, pos_mask=None):
if self.layer_norm_first:
if self.use_rel_pos_bias:
x = x + self.drop_path(
self.attn(
self.norm1(x), rel_pos_bias=rel_pos_bias, pos_mask=pos_mask
)
)
else:
x = x + self.drop_path(self.attn(self.norm1(x)))
t = self.mlp(self.norm2(x))
x = x + self.drop_path(t)
if not self.ffn_targets:
t = x
return x, t
else:
if self.use_rel_pos_bias:
x = x + self.drop_path(
self.attn(x, rel_pos_bias=rel_pos_bias, pos_mask=pos_mask)
)
else:
x = x + self.drop_path(self.attn(x))
r = x = self.norm1(x)
x = self.mlp(x)
t = x
x = self.norm2(r + self.drop_path(x))
if not self.ffn_targets:
t = x
return x, t
class AltAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
window_size=None,
attn_head_dim=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2,
dtype=relative_coords.dtype,
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None, pos_mask=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat(
(
self.q_bias,
torch.zeros_like(self.v_bias, requires_grad=False),
self.v_bias,
)
)
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.relative_position_bias_table is not None:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + modify_relative_position_bias(
relative_position_bias, x.size(0), pos_mask
)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000 ** omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
def interpolate_pos_embed(model, checkpoint_model):
if "pos_embed" in checkpoint_model:
pos_embed_checkpoint = checkpoint_model["pos_embed"]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print(
"Position interpolate from %dx%d to %dx%d"
% (orig_size, orig_size, new_size, new_size)
)
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode="bicubic",
align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model["pos_embed"] = new_pos_embed
@register_model("mae", dataclass=MaeConfig)
class MaeModel(BaseFairseqModel):
def __init__(self, cfg: MaeConfig):
super().__init__()
self.cfg = cfg
self.mask_ratio = cfg.mask_ratio
# --------------------------------------------------------------------------
# MAE encoder specifics
self.patch_embed = PatchEmbed(
cfg.input_size, cfg.patch_size, cfg.in_chans, cfg.embed_dim
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, cfg.embed_dim)) if not cfg.no_cls else None
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + int(not cfg.no_cls), cfg.embed_dim), requires_grad=False
) # fixed sin-cos embedding
norm_layer = partial(nn.LayerNorm, eps=cfg.norm_eps)
dpr = [
x.item() for x in torch.linspace(0, cfg.drop_path_rate, cfg.depth)
] # stochastic depth decay rule
def make_block(drop_path):
if cfg.w2v_block:
return TransformerSentenceEncoderLayer(
embedding_dim=cfg.embed_dim,
ffn_embedding_dim=cfg.embed_dim * cfg.mlp_ratio,
num_attention_heads=cfg.num_heads,
dropout=cfg.block_dropout,
attention_dropout=cfg.attention_dropout,
activation_dropout=cfg.activation_dropout,
activation_fn="gelu",
layer_norm_first=cfg.layer_norm_first,
drop_path=drop_path,
norm_eps=1e-6,
single_qkv=cfg.single_qkv,
fused_ln=cfg.fused_ln,
)
elif cfg.alt_block:
window_size = (
cfg.input_size // self.patch_embed.patch_size[0],
cfg.input_size // self.patch_embed.patch_size[1],
)
return AltBlock(
cfg.embed_dim,
cfg.num_heads,
cfg.mlp_ratio,
qkv_bias=True,
qk_scale=None,
norm_layer=norm_layer,
drop_path=drop_path,
layer_norm_first=cfg.layer_norm_first,
ffn_targets=not cfg.end_of_block_targets,
use_rel_pos_bias=cfg.use_rel_pos_bias,
window_size=window_size
if (self.cfg.use_rel_pos_bias and not self.cfg.shared_rel_pos_bias)
else None,
alt_attention=cfg.alt_attention,
)
elif cfg.alt_block2:
from .multi.modules import AltBlock as AltBlock2
return AltBlock2(
cfg.embed_dim,
cfg.num_heads,
cfg.mlp_ratio,
qkv_bias=True,
qk_scale=None,
norm_layer=norm_layer,
drop_path=drop_path,
layer_norm_first=cfg.layer_norm_first,
ffn_targets=not cfg.end_of_block_targets,
)
else:
return Block(
cfg.embed_dim,
cfg.num_heads,
cfg.mlp_ratio,
qkv_bias=True,
qk_scale=None,
norm_layer=norm_layer,
drop_path=drop_path,
)
self.blocks = nn.ModuleList([make_block(dpr[i]) for i in range(cfg.depth)])
self.norm = norm_layer(cfg.embed_dim)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAE decoder specifics
self.decoder_embed = (
nn.Linear(cfg.embed_dim, cfg.decoder_embed_dim, bias=True)
if not cfg.no_decoder_embed
else None
)
self.mask_token = (
nn.Parameter(
torch.zeros(
1,
1,
cfg.decoder_embed_dim
if not cfg.no_decoder_embed
else cfg.embed_dim,
)
)
if cfg.mask_noise_std <= 0
else None
)
self.decoder_pos_embed = (
nn.Parameter(
torch.zeros(
1,
num_patches + 1,
cfg.decoder_embed_dim
if not cfg.no_decoder_embed
else cfg.embed_dim,
),
requires_grad=False,
)
if not cfg.no_decoder_pos_embed
else None
)
self.decoder_blocks = nn.ModuleList(
[
Block(
cfg.decoder_embed_dim,
cfg.decoder_num_heads,
cfg.mlp_ratio,
qkv_bias=True,
qk_scale=None,
norm_layer=norm_layer,
)
for _ in range(cfg.decoder_depth)
]
)
self.decoder_norm = norm_layer(cfg.decoder_embed_dim)
self.decoder_pred = nn.Linear(
cfg.decoder_embed_dim, cfg.patch_size ** 2 * cfg.in_chans, bias=True
) # decoder to patch
# --------------------------------------------------------------------------
self.norm_pix_loss = cfg.norm_pix_loss
self.initialize_weights()
for pn, p in self.named_parameters():
if len(p.shape) == 1 or pn.endswith(".bias"):
p.param_group = "no_decay"
else:
p.param_group = "with_decay"
def initialize_weights(self):
# initialization
# initialize (and freeze) pos_embed by sin-cos embedding
pos_embed = get_2d_sincos_pos_embed(
self.pos_embed.shape[-1],
int(self.patch_embed.num_patches ** 0.5),
cls_token=not self.cfg.no_cls,
)
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
if self.decoder_pos_embed is not None:
decoder_pos_embed = get_2d_sincos_pos_embed(
self.decoder_pos_embed.shape[-1],
int(self.patch_embed.num_patches ** 0.5),
cls_token=not self.cfg.no_cls,
)
self.decoder_pos_embed.data.copy_(
torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)
)
# initialize patch_embed like nn.Linear (instead of nn.Conv2d)
w = self.patch_embed.proj.weight.data
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
if self.cls_token is not None:
torch.nn.init.normal_(self.cls_token, std=0.02)
if self.mask_token is not None:
torch.nn.init.normal_(self.mask_token, std=0.02)
# initialize nn.Linear and nn.LayerNorm
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
# we use xavier_uniform following official JAX ViT:
torch.nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm) or isinstance(m, FusedLayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.patch_embed.patch_size[0]
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum("nchpwq->nhwpqc", x)
x = x.reshape(shape=(imgs.shape[0], h * w, p ** 2 * 3))
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.patch_embed.patch_size[0]
h = w = int(x.shape[1] ** 0.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
x = torch.einsum("nhwpqc->nchpwq", x)
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
return imgs
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(
noise, dim=1
) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore # x_masked is actually unmasked x
@classmethod
def build_model(cls, cfg: MaeConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def forward_encoder(self, x, mask_ratio):
# embed patches
x = self.patch_embed(x)
# add pos embed w/o cls token
# if self.cls_token is not None:
# x = x + self.pos_embed
# else:
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
if mask_ratio > 0:
x, mask, ids_restore = self.random_masking(x, mask_ratio)
else:
mask = ids_restore = None
# append cls token
if self.cls_token is not None:
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
if self.norm is not None:
x = self.norm(x)
return x, mask, ids_restore
def forward_decoder(self, x, ids_restore):
# embed tokens
x = self.decoder_embed(x)
# append mask tokens to sequence
mask_tokens = self.mask_token.repeat(
x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1
)
if self.cls_token is not None:
x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
else:
x_ = torch.cat([x, mask_tokens], dim=1) # no cls token
x_ = torch.gather(
x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])
) # unshuffle
if self.cls_token is not None:
x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
# add pos embed
x = x + self.decoder_pos_embed
# apply Transformer blocks
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_norm(x)
# predictor projection
x = self.decoder_pred(x)
if self.cls_token is not None:
# remove cls token
x = x[:, 1:, :]
return x
def forward_loss(self, imgs, pred, mask):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove,
"""
target = self.patchify(imgs)
if self.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.0e-6) ** 0.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum()
return loss, mask.sum()
def forward(self, imgs, predictions_only=False):
latent, mask, ids_restore = self.forward_encoder(
imgs, self.mask_ratio if not predictions_only else 0
)
if predictions_only:
return latent
pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
loss, sample_size = self.forward_loss(imgs, pred, mask)
result = {
"losses": {"regression": loss},
"sample_size": sample_size,
}
return result
def remove_pretraining_modules(self):
self.decoder_embed = None
self.decoder_blocks = None
self.decoder_norm = None
self.decoder_pos_embed = None
self.decoder_pred = None
self.mask_token = None
if self.cfg.layer_norm_first:
self.norm = None
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/mae.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass, field
from typing import Optional, Callable
from functools import partial
import numpy as np
from omegaconf import II
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from examples.data2vec.data.modality import Modality
from examples.data2vec.models.modalities.base import (
MaskSeed,
D2vModalityConfig,
ModalitySpecificEncoder,
get_annealed_rate,
)
from examples.data2vec.models.modalities.modules import (
D2vDecoderConfig,
AltBlock,
Decoder1d,
)
from examples.data2vec.models.modalities.audio import (
D2vAudioConfig,
AudioEncoder,
)
from examples.data2vec.models.modalities.images import (
D2vImageConfig,
ImageEncoder,
)
from examples.data2vec.models.modalities.text import (
D2vTextConfig,
TextEncoder,
)
logger = logging.getLogger(__name__)
@dataclass
class D2vModalitiesConfig(FairseqDataclass):
audio: D2vAudioConfig = D2vAudioConfig()
image: D2vImageConfig = D2vImageConfig()
text: D2vTextConfig = D2vTextConfig()
@dataclass
class Data2VecMultiConfig(FairseqDataclass):
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
depth: int = 8
start_drop_path_rate: float = 0
end_drop_path_rate: float = 0
num_heads: int = 12
norm_eps: float = 1e-6
norm_affine: bool = True
encoder_dropout: float = 0.1
post_mlp_drop: float = 0.1
attention_dropout: float = 0.1
activation_dropout: float = 0.0
dropout_input: float = 0.0
layerdrop: float = 0.0
embed_dim: int = 768
mlp_ratio: float = 4
layer_norm_first: bool = False
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
end_of_block_targets: bool = False
clone_batch: int = 1
layer_norm_target_layer: bool = False
batch_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_same_dtype: bool = True
log_norms: bool = True
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_encoder_only: bool = field(
default=True,
metadata={
"help": "whether to momentum update only the shared transformer encoder"
},
)
max_update: int = II("optimization.max_update")
modalities: D2vModalitiesConfig = D2vModalitiesConfig()
shared_decoder: Optional[D2vDecoderConfig] = None
min_target_var: float = field(
default=0.1, metadata={"help": "stop training if target var falls below this"}
)
min_pred_var: float = field(
default=0.01,
metadata={"help": "stop training if prediction var falls below this"},
)
supported_modality: Optional[Modality] = None
mae_init: bool = False
seed: int = II("common.seed")
skip_ema: bool = False
cls_loss: float = 0
recon_loss: float = 0
d2v_loss: float = 1
decoder_group: bool = False
@register_model("data2vec_multi", dataclass=Data2VecMultiConfig)
class Data2VecMultiModel(BaseFairseqModel):
def make_modality_encoder(
self,
cfg: D2vModalityConfig,
embed_dim: int,
make_block: Callable[[float], nn.ModuleList],
norm_layer: Callable[[int], nn.LayerNorm],
layer_norm_first: bool,
alibi_biases,
task,
) -> ModalitySpecificEncoder:
if cfg.type == Modality.AUDIO:
enc_cls = AudioEncoder
elif cfg.type == Modality.IMAGE:
enc_cls = ImageEncoder
elif cfg.type == Modality.TEXT:
enc_cls = TextEncoder
if hasattr(task, "text_task"):
task = task.text_task
else:
raise Exception(f"unsupported modality {cfg.type}")
return enc_cls(
cfg,
embed_dim,
make_block,
norm_layer,
layer_norm_first,
alibi_biases,
task,
)
def __init__(self, cfg: Data2VecMultiConfig, modalities, skip_ema=False, task=None):
super().__init__()
self.cfg = cfg
self.modalities = modalities
self.task = task
make_layer_norm = partial(
nn.LayerNorm, eps=cfg.norm_eps, elementwise_affine=cfg.norm_affine
)
def make_block(drop_path, dim=None, heads=None):
return AltBlock(
cfg.embed_dim if dim is None else dim,
cfg.num_heads if heads is None else heads,
cfg.mlp_ratio,
qkv_bias=True,
drop=cfg.encoder_dropout,
attn_drop=cfg.attention_dropout,
mlp_drop=cfg.activation_dropout,
post_mlp_drop=cfg.post_mlp_drop,
drop_path=drop_path,
norm_layer=make_layer_norm,
layer_norm_first=cfg.layer_norm_first,
ffn_targets=not cfg.end_of_block_targets,
)
self.alibi_biases = {}
self.modality_encoders = nn.ModuleDict()
for mod in self.modalities:
mod_cfg = getattr(cfg.modalities, mod.name.lower())
enc = self.make_modality_encoder(
mod_cfg,
cfg.embed_dim,
make_block,
make_layer_norm,
cfg.layer_norm_first,
self.alibi_biases,
task,
)
self.modality_encoders[mod.name] = enc
self.ema = None
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_beta = cfg.loss_beta
self.loss_scale = cfg.loss_scale
self.dropout_input = nn.Dropout(cfg.dropout_input)
dpr = np.linspace(cfg.start_drop_path_rate, cfg.end_drop_path_rate, cfg.depth)
self.blocks = nn.ModuleList([make_block(dpr[i]) for i in range(cfg.depth)])
self.norm = None
if cfg.layer_norm_first:
self.norm = make_layer_norm(cfg.embed_dim)
if self.cfg.mae_init:
self.apply(self._init_weights)
else:
from fairseq.modules.transformer_sentence_encoder import init_bert_params
self.apply(init_bert_params)
for mod_enc in self.modality_encoders.values():
mod_enc.reset_parameters()
if not skip_ema:
self.ema = self.make_ema_teacher(cfg.ema_decay)
self.shared_decoder = (
Decoder1d(cfg.shared_decoder, cfg.embed_dim)
if self.cfg.shared_decoder is not None
else None
)
if self.shared_decoder is not None:
self.shared_decoder.apply(self._init_weights)
self.recon_proj = None
if cfg.recon_loss > 0:
self.recon_proj = nn.Linear(cfg.embed_dim, cfg.embed_dim)
for pn, p in self.named_parameters():
if len(p.shape) == 1 or pn.endswith(".bias") or "alibi_scale" in pn:
p.optim_overrides = {"optimizer": {"weight_decay_scale": 0}}
if cfg.decoder_group and "decoder" in pn:
p.param_group = "decoder"
self.num_updates = 0
def _init_weights(self, m):
try:
from apex.normalization import FusedLayerNorm
fn = FusedLayerNorm
except:
fn = nn.LayerNorm
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm) or isinstance(m, fn):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if m.weight is not None:
nn.init.constant_(m.weight, 1.0)
@torch.no_grad()
def make_ema_teacher(self, ema_decay):
ema_config = EMAModuleConfig(
ema_decay=ema_decay,
ema_fp32=True,
log_norms=self.cfg.log_norms,
add_missing_params=False,
)
model_copy = self.make_target_model()
return EMAModule(
model_copy,
ema_config,
copy_model=False,
)
def make_target_model(self):
logger.info("making target model")
model_copy = Data2VecMultiModel(
self.cfg, self.modalities, skip_ema=True, task=self.task
)
if self.cfg.ema_encoder_only:
model_copy = model_copy.blocks
for p_s, p_t in zip(self.blocks.parameters(), model_copy.parameters()):
p_t.data.copy_(p_s.data)
else:
for p_s, p_t in zip(self.parameters(), model_copy.parameters()):
p_t.data.copy_(p_s.data)
for mod_enc in model_copy.modality_encoders.values():
mod_enc.decoder = None
if not mod_enc.modality_cfg.ema_local_encoder:
mod_enc.local_encoder = None
mod_enc.project_features = None
model_copy.requires_grad_(False)
return model_copy
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is not None and (
(self.num_updates == 0 and num_updates > 1)
or self.num_updates >= num_updates
):
pass
elif self.training and self.ema is not None:
ema_weight_decay = None
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay, weight_decay=ema_weight_decay)
if self.ema.get_decay() < 1:
self.ema.step(self.blocks if self.cfg.ema_encoder_only else self)
self.num_updates = num_updates
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
k = prefix + "_ema"
if self.ema is not None:
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
elif k in state_dict:
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@classmethod
def build_model(cls, cfg: Data2VecMultiConfig, task=None):
"""Build a new model instance."""
if task is None or not hasattr(task, "supported_modalities"):
modalities = (
[cfg.supported_modality]
if cfg.supported_modality is not None
else [
Modality.AUDIO,
Modality.IMAGE,
Modality.TEXT,
]
)
else:
modalities = task.supported_modalities
return cls(cfg, modalities, task=task, skip_ema=cfg.skip_ema)
def forward(
self,
source,
target=None,
id=None,
mode=None,
padding_mask=None,
mask=True,
features_only=False,
force_remove_masked=False,
remove_extra_tokens=True,
precomputed_mask=None,
):
if mode is None:
assert self.cfg.supported_modality is not None
mode = self.cfg.supported_modality
if isinstance(mode, Modality):
mode = mode.name
feature_extractor = self.modality_encoders[mode]
mask_seeds = None
if id is not None:
mask_seeds = MaskSeed(seed=self.cfg.seed, update=self.num_updates, ids=id)
extractor_out = feature_extractor(
source,
padding_mask,
mask,
remove_masked=not features_only or force_remove_masked,
clone_batch=self.cfg.clone_batch if not features_only else 1,
mask_seeds=mask_seeds,
precomputed_mask=precomputed_mask,
)
x = extractor_out["x"]
encoder_mask = extractor_out["encoder_mask"]
masked_padding_mask = extractor_out["padding_mask"]
masked_alibi_bias = extractor_out.get("alibi_bias", None)
alibi_scale = extractor_out.get("alibi_scale", None)
if self.dropout_input is not None:
x = self.dropout_input(x)
layer_results = []
for i, blk in enumerate(self.blocks):
if (
not self.training
or self.cfg.layerdrop == 0
or (np.random.random() > self.cfg.layerdrop)
):
ab = masked_alibi_bias
if ab is not None and alibi_scale is not None:
scale = (
alibi_scale[i]
if alibi_scale.size(0) > 1
else alibi_scale.squeeze(0)
)
ab = ab * scale.type_as(ab)
x, lr = blk(
x,
padding_mask=masked_padding_mask,
alibi_bias=ab,
)
if features_only:
layer_results.append(lr)
if self.norm is not None:
x = self.norm(x)
if features_only:
if remove_extra_tokens:
x = x[:, feature_extractor.modality_cfg.num_extra_tokens :]
if masked_padding_mask is not None:
masked_padding_mask = masked_padding_mask[
:, feature_extractor.modality_cfg.num_extra_tokens :
]
return {
"x": x,
"padding_mask": masked_padding_mask,
"layer_results": layer_results,
"mask": encoder_mask,
}
xs = []
if self.shared_decoder is not None:
dx = self.forward_decoder(
x,
feature_extractor,
self.shared_decoder,
encoder_mask,
)
xs.append(dx)
if feature_extractor.decoder is not None:
dx = self.forward_decoder(
x,
feature_extractor,
feature_extractor.decoder,
encoder_mask,
)
xs.append(dx)
orig_x = x
assert len(xs) > 0
p = next(self.ema.model.parameters())
device = x.device
dtype = x.dtype
ema_device = p.device
ema_dtype = p.dtype
if not self.cfg.ema_same_dtype:
dtype = ema_dtype
if ema_device != device or ema_dtype != dtype:
logger.info(f"adjusting ema dtype to {dtype} and device to {device}")
self.ema.model = self.ema.model.to(dtype=dtype, device=device)
ema_dtype = dtype
def to_device(d):
for k, p in d.items():
if isinstance(d[k], dict):
to_device(d[k])
else:
d[k] = p.to(device=device)
to_device(self.ema.fp32_params)
tm = self.ema.model
with torch.no_grad():
tm.eval()
if self.cfg.ema_encoder_only:
assert target is None
ema_input = extractor_out["local_features"]
ema_input = feature_extractor.contextualized_features(
ema_input.to(dtype=ema_dtype),
padding_mask,
mask=False,
remove_masked=False,
)
ema_blocks = tm
else:
ema_blocks = tm.blocks
if feature_extractor.modality_cfg.ema_local_encoder:
inp = (
target.to(dtype=ema_dtype)
if target is not None
else source.to(dtype=ema_dtype)
)
ema_input = tm.modality_encoders[mode](
inp,
padding_mask,
mask=False,
remove_masked=False,
)
else:
assert target is None
ema_input = extractor_out["local_features"]
ema_feature_enc = tm.modality_encoders[mode]
ema_input = ema_feature_enc.contextualized_features(
ema_input.to(dtype=ema_dtype),
padding_mask,
mask=False,
remove_masked=False,
)
ema_padding_mask = ema_input["padding_mask"]
ema_alibi_bias = ema_input.get("alibi_bias", None)
ema_alibi_scale = ema_input.get("alibi_scale", None)
ema_input = ema_input["x"]
y = []
ema_x = []
extra_tokens = feature_extractor.modality_cfg.num_extra_tokens
for i, blk in enumerate(ema_blocks):
ab = ema_alibi_bias
if ab is not None and alibi_scale is not None:
scale = (
ema_alibi_scale[i]
if ema_alibi_scale.size(0) > 1
else ema_alibi_scale.squeeze(0)
)
ab = ab * scale.type_as(ab)
ema_input, lr = blk(
ema_input,
padding_mask=ema_padding_mask,
alibi_bias=ab,
)
y.append(lr[:, extra_tokens:])
ema_x.append(ema_input[:, extra_tokens:])
y = self.make_targets(y, self.average_top_k_layers)
orig_targets = y
if self.cfg.clone_batch > 1:
y = y.repeat_interleave(self.cfg.clone_batch, 0)
masked = encoder_mask.mask.unsqueeze(-1)
masked_b = encoder_mask.mask.bool()
y = y[masked_b]
if xs[0].size(1) == masked_b.size(1):
xs = [x[masked_b] for x in xs]
else:
xs = [x.reshape(-1, x.size(-1)) for x in xs]
sample_size = masked.sum().long()
result = {
"losses": {},
"sample_size": sample_size,
}
sample_size = result["sample_size"]
if self.cfg.cls_loss > 0:
assert extra_tokens > 0
cls_target = orig_targets.mean(dim=1)
if self.cfg.clone_batch > 1:
cls_target = cls_target.repeat_interleave(self.cfg.clone_batch, 0)
cls_pred = x[:, extra_tokens - 1]
result["losses"]["cls"] = self.d2v_loss(cls_pred, cls_target) * (
self.cfg.cls_loss * sample_size
)
if self.cfg.recon_loss > 0:
with torch.no_grad():
target = feature_extractor.patchify(source)
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.0e-6) ** 0.5
if self.cfg.clone_batch > 1:
target = target.repeat_interleave(self.cfg.clone_batch, 0)
if masked_b is not None:
target = target[masked_b]
recon = xs[0]
if self.recon_proj is not None:
recon = self.recon_proj(recon)
result["losses"]["recon"] = (
self.d2v_loss(recon, target.float()) * self.cfg.recon_loss
)
if self.cfg.d2v_loss > 0:
for i, x in enumerate(xs):
reg_loss = self.d2v_loss(x, y)
n = f"{mode}_regression_{i}" if len(xs) > 1 else f"{mode}_regression"
result["losses"][n] = reg_loss * self.cfg.d2v_loss
suffix = "" if len(self.modalities) == 1 else f"_{mode}"
with torch.no_grad():
if encoder_mask is not None:
result["masked_pct"] = 1 - (
encoder_mask.ids_keep.size(1) / encoder_mask.ids_restore.size(1)
)
for i, x in enumerate(xs):
n = f"pred_var{suffix}_{i}" if len(xs) > 1 else f"pred_var{suffix}"
result[n] = self.compute_var(x.float())
if self.ema is not None:
for k, v in self.ema.logs.items():
result[k] = v
y = y.float()
result[f"target_var{suffix}"] = self.compute_var(y)
if self.num_updates > 5000:
if result[f"target_var{suffix}"] < self.cfg.min_target_var:
logger.error(
f"target var is {result[f'target_var{suffix}'].item()} < {self.cfg.min_target_var}, exiting ({mode})"
)
raise Exception(
f"target var is {result[f'target_var{suffix}'].item()} < {self.cfg.min_target_var}, exiting ({mode})"
)
for k in result.keys():
if k.startswith("pred_var") and result[k] < self.cfg.min_pred_var:
logger.error(
f"{k} is {result[k].item()} < {self.cfg.min_pred_var}, exiting ({mode})"
)
raise Exception(
f"{k} is {result[k].item()} < {self.cfg.min_pred_var}, exiting ({mode})"
)
result["ema_decay"] = self.ema.get_decay() * 1000
return result
def forward_decoder(
self,
x,
feature_extractor,
decoder,
mask_info,
):
x = feature_extractor.decoder_input(x, mask_info)
x = decoder(*x)
return x
def d2v_loss(self, x, y):
x = x.view(-1, x.size(-1)).float()
y = y.view(-1, x.size(-1))
if self.loss_beta == 0:
loss = F.mse_loss(x, y, reduction="none")
else:
loss = F.smooth_l1_loss(x, y, reduction="none", beta=self.loss_beta)
if self.loss_scale is not None:
scale = self.loss_scale
else:
scale = 1 / math.sqrt(x.size(-1))
reg_loss = loss * scale
return reg_loss
def make_targets(self, y, num_layers):
with torch.no_grad():
target_layer_results = y[-num_layers:]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
target_layer_results = [
tl.transpose(1, 2) for tl in target_layer_results # BTC -> BCT
]
permuted = True
if self.cfg.batch_norm_target_layer:
target_layer_results = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in target_layer_results
]
if self.cfg.instance_norm_target_layer:
target_layer_results = [
F.instance_norm(tl.float()) for tl in target_layer_results
]
if permuted:
target_layer_results = [
tl.transpose(1, 2) for tl in target_layer_results # BCT -> BTC
]
if self.cfg.layer_norm_target_layer:
target_layer_results = [
F.layer_norm(tl.float(), tl.shape[-1:])
for tl in target_layer_results
]
y = target_layer_results[0].float()
for tl in target_layer_results[1:]:
y.add_(tl.float())
y = y.div_(len(target_layer_results))
if self.cfg.layer_norm_targets:
y = F.layer_norm(y, y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.transpose(1, 2)).transpose(1, 2)
return y
@staticmethod
def compute_var(y):
y = y.view(-1, y.size(-1))
if dist.is_initialized():
zc = torch.tensor(y.size(0)).cuda()
zs = y.sum(dim=0)
zss = (y**2).sum(dim=0)
dist.all_reduce(zc)
dist.all_reduce(zs)
dist.all_reduce(zss)
var = zss / (zc - 1) - (zs**2) / (zc * (zc - 1))
return torch.sqrt(var + 1e-6).mean()
else:
return torch.sqrt(y.var(dim=0) + 1e-6).mean()
def extract_features(
self, source, mode=None, padding_mask=None, mask=False, remove_extra_tokens=True
):
res = self.forward(
source,
mode=mode,
padding_mask=padding_mask,
mask=mask,
features_only=True,
remove_extra_tokens=remove_extra_tokens,
)
return res
def remove_pretraining_modules(self, modality=None, keep_decoder=False):
self.ema = None
self.cfg.clone_batch = 1
self.recon_proj = None
if not keep_decoder:
self.shared_decoder = None
modality = modality.lower() if modality is not None else None
for k in list(self.modality_encoders.keys()):
if modality is not None and k.lower() != modality:
del self.modality_encoders[k]
else:
self.modality_encoders[k].remove_pretraining_modules(
keep_decoder=keep_decoder
)
if not keep_decoder:
self.modality_encoders[k].decoder = None
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import re
from dataclasses import dataclass, field
from typing import Any, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from omegaconf import II, MISSING, open_dict
from fairseq import checkpoint_utils, tasks
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import (
BaseFairseqModel,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import TransposeLast
from fairseq.tasks import FairseqTask
logger = logging.getLogger(__name__)
@dataclass
class AudioClassificationConfig(FairseqDataclass):
model_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
require_same_masks: bool = field(
default=True,
metadata={
"help": "whether to number of masked timesteps must be the same across all "
"examples in a batch"
},
)
mask_dropout: float = field(
default=0.0,
metadata={"help": "percent of masks to unmask for each sample"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
mask_channel_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
mask_channel_before: bool = False
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded wav2vec args
d2v_args: Any = None
offload_activations: bool = field(
default=False, metadata={"help": "offload_activations"}
)
min_params_to_wrap: int = field(
default=int(1e8),
metadata={
"help": "minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
},
)
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
ddp_backend: str = II("distributed_training.ddp_backend")
prediction_mode: str = "lin_softmax"
eval_prediction_mode: Optional[str] = None
conv_kernel: int = -1
conv_stride: int = 1
two_convs: bool = False
extreme_factor: float = 1.0
conv_feature_layers: Optional[str] = field(
default=None,
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
mixup_prob: float = 1.0
source_mixup: float = -1
same_mixup: bool = True
label_mixup: bool = False
gain_mode: str = "none"
@register_model("audio_classification", dataclass=AudioClassificationConfig)
class AudioClassificationModel(BaseFairseqModel):
def __init__(self, cfg: AudioClassificationConfig, num_classes):
super().__init__()
self.apply_mask = cfg.apply_mask
self.cfg = cfg
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"require_same_masks": getattr(cfg, "require_same_masks", True),
"mask_dropout": getattr(cfg, "mask_dropout", 0),
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_before": cfg.mask_channel_before,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
"checkpoint_activations": cfg.checkpoint_activations,
"offload_activations": cfg.offload_activations,
"min_params_to_wrap": cfg.min_params_to_wrap,
"mixup": -1,
}
if cfg.conv_feature_layers is not None:
arg_overrides["conv_feature_layers"] = cfg.conv_feature_layers
if cfg.d2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(
cfg.model_path, arg_overrides
)
d2v_args = state.get("cfg", None)
if d2v_args is None:
d2v_args = convert_namespace_to_omegaconf(state["args"])
d2v_args.criterion = None
d2v_args.lr_scheduler = None
cfg.d2v_args = d2v_args
logger.info(d2v_args)
else:
state = None
d2v_args = cfg.d2v_args
model_normalized = d2v_args.task.get(
"normalize", d2v_args.model.get("normalize", False)
)
assert cfg.normalize == model_normalized, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
if hasattr(cfg, "checkpoint_activations") and cfg.checkpoint_activations:
with open_dict(d2v_args):
d2v_args.model.checkpoint_activations = cfg.checkpoint_activations
d2v_args.task.data = cfg.data
task = tasks.setup_task(d2v_args.task)
model = task.build_model(d2v_args.model, from_checkpoint=True)
model.remove_pretraining_modules()
if state is not None and not cfg.no_pretrained_weights:
self.load_model_weights(state, model, cfg)
d = d2v_args.model.encoder_embed_dim
self.d2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
for p in self.parameters():
p.param_group = "pretrained"
if cfg.prediction_mode == "proj_avg_proj":
self.proj = nn.Linear(d, d * 2)
self.proj2 = nn.Linear(d * 2, num_classes)
for p in self.proj.parameters():
p.param_group = "projection"
for p in self.proj2.parameters():
p.param_group = "projection"
elif self.cfg.prediction_mode == "summary_proj":
self.proj = nn.Linear(d // 3, num_classes)
for p in self.proj.parameters():
p.param_group = "projection"
elif self.cfg.conv_kernel > 1 and not self.cfg.two_convs:
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(d, num_classes, kernel_size=self.cfg.conv_kernel, stride=self.cfg.conv_stride),
TransposeLast(),
)
for p in self.proj.parameters():
p.param_group = "projection"
elif self.cfg.conv_kernel > 0 and self.cfg.two_convs:
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(d, d, kernel_size=self.cfg.conv_kernel, stride=self.cfg.conv_stride),
TransposeLast(),
nn.GELU(),
nn.Linear(d, num_classes),
)
for p in self.proj.parameters():
p.param_group = "projection"
else:
self.proj = nn.Linear(d, num_classes)
for p in self.proj.parameters():
p.param_group = "projection"
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: AudioClassificationConfig, task: FairseqTask):
"""Build a new model instance."""
assert hasattr(task, "labels"), f"Task {task} must have an attribute 'labels'"
return cls(cfg, len(task.labels))
def load_model_weights(self, state, model, cfg):
if cfg.ddp_backend == "fully_sharded":
from fairseq.distributed import FullyShardedDataParallel
for name, module in model.named_modules():
if "encoder.layers" in name and len(name.split(".")) == 3:
# Only for layers, we do a special handling and load the weights one by one
# We dont load all weights together as that wont be memory efficient and may
# cause oom
new_dict = {
k.replace(name + ".", ""): v
for (k, v) in state["model"].items()
if name + "." in k
}
assert isinstance(module, FullyShardedDataParallel)
with module.summon_full_params():
module.load_state_dict(new_dict, strict=True)
module._reset_lazy_init()
# Once layers are loaded, filter them out and load everything else.
r = re.compile("encoder.layers.\d.")
filtered_list = list(filter(r.match, state["model"].keys()))
new_big_dict = {
k: v for (k, v) in state["model"].items() if k not in filtered_list
}
model.load_state_dict(new_big_dict, strict=False)
else:
if "_ema" in state["model"]:
del state["model"]["_ema"]
model.load_state_dict(state["model"], strict=False)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def compute_gain(self, sound, fs=16_000, min_db=-80.0, mode="A_weighting"):
if fs == 16000:
n_fft = 2048
elif fs == 44100:
n_fft = 4096
else:
raise Exception("Invalid fs {}".format(fs))
stride = n_fft // 2
def a_weight(fs, n_fft, min_db=-80.0):
freq = np.linspace(0, fs // 2, n_fft // 2 + 1)
freq_sq = np.power(freq, 2)
freq_sq[0] = 1.0
weight = 2.0 + 20.0 * (
2 * np.log10(12194)
+ 2 * np.log10(freq_sq)
- np.log10(freq_sq + 12194 ** 2)
- np.log10(freq_sq + 20.6 ** 2)
- 0.5 * np.log10(freq_sq + 107.7 ** 2)
- 0.5 * np.log10(freq_sq + 737.9 ** 2)
)
weight = np.maximum(weight, min_db)
return weight
gain = []
for i in range(0, len(sound) - n_fft + 1, stride):
if mode == "RMSE":
g = np.mean(sound[i : i + n_fft] ** 2)
elif mode == "A_weighting":
spec = np.fft.rfft(np.hanning(n_fft + 1)[:-1] * sound[i : i + n_fft])
power_spec = np.abs(spec) ** 2
a_weighted_spec = power_spec * np.power(10, a_weight(fs, n_fft) / 10)
g = np.sum(a_weighted_spec)
else:
raise Exception("Invalid mode {}".format(mode))
gain.append(g)
gain = np.array(gain)
gain = np.maximum(gain, np.power(10, min_db / 10))
gain_db = 10 * np.log10(gain)
return gain_db
# adapted from https://github.com/mil-tokyo/bc_learning_sound/blob/master/utils.py
def compute_gain_torch(self, sound, fs=16_000, min_db=-80.0, mode="A_weighting"):
if fs == 16000:
n_fft = 2048
elif fs == 44100:
n_fft = 4096
else:
raise Exception("Invalid fs {}".format(fs))
if mode == "A_weighting":
if not hasattr(self, f"a_weight"):
self.a_weight = {}
if fs not in self.a_weight:
def a_weight(fs, n_fft, min_db=-80.0):
freq = np.linspace(0, fs // 2, n_fft // 2 + 1)
freq_sq = freq ** 2
freq_sq[0] = 1.0
weight = 2.0 + 20.0 * (
2 * np.log10(12194)
+ 2 * np.log10(freq_sq)
- np.log10(freq_sq + 12194 ** 2)
- np.log10(freq_sq + 20.6 ** 2)
- 0.5 * np.log10(freq_sq + 107.7 ** 2)
- 0.5 * np.log10(freq_sq + 737.9 ** 2)
)
weight = np.maximum(weight, min_db)
return weight
self.a_weight[fs] = torch.from_numpy(
np.power(10, a_weight(fs, n_fft, min_db) / 10)
).to(device=sound.device)
sound = sound.unfold(-1, n_fft, n_fft // 2)
if mode == "RMSE":
sound = sound ** 2
g = sound.mean(-1)
elif mode == "A_weighting":
w = torch.hann_window(n_fft, device=sound.device) * sound
spec = torch.fft.rfft(w)
power_spec = spec.abs() ** 2
a_weighted_spec = power_spec * self.a_weight[fs]
g = a_weighted_spec.sum(-1)
else:
raise Exception("Invalid mode {}".format(mode))
gain = torch.maximum(g, torch.tensor(10 ** (min_db / 10), device=g.device))
gain_db = 10 * torch.log10(gain)
return gain_db
def forward(self, source, padding_mask, label=None, **kwargs):
if self.cfg.source_mixup >= 0 and self.training and self.cfg.mixup_prob > 0:
with torch.no_grad():
mixed_source = source
mix_mask = None
if self.cfg.mixup_prob < 1:
mix_mask = (
torch.empty((source.size(0),), device=source.device)
.bernoulli_(self.cfg.mixup_prob)
.bool()
)
mixed_source = source[mix_mask]
r = (
torch.FloatTensor(
1 if self.cfg.same_mixup else mixed_source.size(0)
)
.uniform_(max(1e-6, self.cfg.source_mixup), 1)
.to(dtype=source.dtype, device=source.device)
)
mixup_perm = torch.randperm(source.size(0))
s2 = source[mixup_perm]
if self.cfg.gain_mode == "none":
p = r.unsqueeze(-1)
if mix_mask is not None:
s2 = s2[mix_mask]
else:
if self.cfg.gain_mode == "naive_rms":
G1 = source.pow(2).mean(dim=-1).sqrt()
else:
G1, _ = self.compute_gain_torch(
source, mode=self.cfg.gain_mode
).max(-1)
G1 = G1.to(dtype=source.dtype)
G2 = G1[mixup_perm]
if mix_mask is not None:
G1 = G1[mix_mask]
G2 = G2[mix_mask]
s2 = s2[mix_mask]
p = 1 / (1 + 10 ** ((G1 - G2) / 20) * (1 - r) / r)
p = p.unsqueeze(-1)
mixed = (p * mixed_source) + (1 - p) * s2
if mix_mask is None:
source = mixed / torch.sqrt(p ** 2 + (1 - p) ** 2)
else:
source[mix_mask] = mixed / torch.sqrt(p ** 2 + (1 - p) ** 2)
if label is not None and self.cfg.label_mixup:
r = r.unsqueeze(-1)
if mix_mask is None:
label = label * r + (1 - r) * label[mixup_perm]
else:
label[mix_mask] = (
label[mix_mask] * r + (1 - r) * label[mixup_perm][mix_mask]
)
d2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
res = self.d2v_model.extract_features(**d2v_args)
x = res["x"]
padding_mask = res["padding_mask"]
if padding_mask is not None:
x[padding_mask] = 0
x = self.final_dropout(x)
if self.training or (
self.cfg.eval_prediction_mode is None or self.cfg.eval_prediction_mode == ""
):
prediction_mode = self.cfg.prediction_mode
else:
prediction_mode = self.cfg.eval_prediction_mode
if prediction_mode == "average_before":
x = x.mean(dim=1)
if prediction_mode != "summary_mha" and prediction_mode != "summary_proj" and prediction_mode != "cls":
x = self.proj(x)
logits = True
if prediction_mode == "lin_softmax":
x = F.logsigmoid(x.float())
x = torch.logsumexp(x + x, dim=1) - torch.logsumexp(x, dim=1)
x = x.clamp(max=0)
x = x - torch.log(-(torch.expm1(x)))
elif prediction_mode == "extremized_odds":
x = x.float().sum(dim=1)
x = x * self.cfg.extreme_factor
elif prediction_mode == "average_before":
x = x.float()
elif prediction_mode == "average":
x = x.float().mean(dim=1)
elif prediction_mode == "average_sigmoid":
x = torch.sigmoid(x.float())
x = x.mean(dim=1)
logits = False
elif prediction_mode == "max":
x, _ = x.float().max(dim=1)
elif prediction_mode == "max_sigmoid":
x = torch.sigmoid(x.float())
x, _ = x.float().max(dim=1)
logits = False
elif prediction_mode == "proj_avg_proj":
x = x.mean(dim=1)
x = self.proj2(x)
elif prediction_mode == "summary_mha" or prediction_mode == "summary_proj":
x = self.d2v_model.summary(
x, padding_mask, proj=prediction_mode == "summary_proj"
)
x = x.type_as(source)
x = self.proj(x)
elif prediction_mode == "cls":
x = x[:,0]
x = self.proj(x)
else:
raise Exception(f"unknown prediction mode {prediction_mode}")
if label is None:
return torch.sigmoid(x) if logits else x
x = torch.nan_to_num(x)
if logits:
loss = F.binary_cross_entropy_with_logits(
x, label.float(), reduction="none"
)
else:
loss = F.binary_cross_entropy(x, label.float(), reduction="none")
result = {
"losses": {
"main": loss,
},
"sample_size": label.sum(),
}
if not self.training:
result["_predictions"] = torch.sigmoid(x) if logits else x
result["_targets"] = label
return result
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/audio_classification.py |
EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/__init__.py |
|
import math
import torch
def get_alibi(
max_positions: int,
attention_heads: int,
):
def get_slopes(n):
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio ** i for i in range(n)]
# In the paper, we only train models that have 2^a heads for some
# a. This function has some good properties that only occur when
# the input is a power of 2. To maintain that even when the number
# of heads is not a power of 2, we use this workaround.
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
maxpos = max_positions
attn_heads = attention_heads
slopes = torch.Tensor(get_slopes(attn_heads))
# prepare alibi position linear bias. Note that wav2vec2 is non
# autoregressive model so we want a symmetric mask with 0 on the
# diagonal and other wise linear decreasing valuees
pos_bias = (
torch.abs(
torch.arange(maxpos).unsqueeze(0) - torch.arange(maxpos).unsqueeze(1)
)
* -1
)
alibi_bias = slopes.unsqueeze(1).unsqueeze(1) * pos_bias.unsqueeze(0).expand(
attn_heads, -1, -1
)
return alibi_bias
def masked_alibi(alibi_bias, mask_indices, orig_B, orig_T):
alibi_bias = alibi_bias.view(orig_B, -1, orig_T, orig_T)
H = alibi_bias.size(1)
alibi_mask = mask_indices.unsqueeze(1)
alibi_bias = alibi_bias.masked_select(alibi_mask.unsqueeze(-1))
alibi_bias = alibi_bias.view(orig_B, H, -1, orig_T)
M = alibi_bias.size(-2)
alibi_bias = alibi_bias.masked_select(alibi_mask.unsqueeze(-2))
alibi_bias = alibi_bias.view(-1, M, M)
return alibi_bias
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import II
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.data.data_utils import compute_mask_indices
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec import (
ConvFeatureExtractionModel,
Wav2Vec2Config,
TransformerEncoder,
)
from fairseq.modules import (
GradMultiply,
LayerNorm,
)
from fairseq.utils import index_put
logger = logging.getLogger(__name__)
@dataclass
class Data2VecAudioConfig(Wav2Vec2Config):
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
layer_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
batch_norm_target_layer: bool = False
group_norm_target_layer: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_transformer_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer"},
)
ema_layers_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer layers"},
)
max_update: int = II("optimization.max_update")
min_target_var: float = field(
default=0.1, metadata={"help": "stop training if target var falls below this"}
)
min_pred_var: float = field(
default=0.01,
metadata={"help": "stop training if prediction var falls below this"},
)
def get_annealed_rate(start, end, curr_step, total_steps):
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
@register_model("data2vec_audio", dataclass=Data2VecAudioConfig)
class Data2VecAudioModel(BaseFairseqModel):
def __init__(self, cfg: Data2VecAudioConfig):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.extractor_embed = feature_enc_layers[-1][0]
self.ema = None
self.embed = cfg.encoder_embed_dim
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_beta = cfg.loss_beta
self.loss_scale = cfg.loss_scale
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = nn.Linear(self.extractor_embed, cfg.encoder_embed_dim)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.extractor_embed)
self.final_proj = nn.Linear(self.embed, self.embed)
self.num_updates = 0
def make_ema_teacher(self):
ema_config = EMAModuleConfig(
ema_decay=self.cfg.ema_decay,
ema_fp32=True,
)
skip_keys = set()
if self.cfg.ema_layers_only:
self.cfg.ema_transformer_only = True
for k, _ in self.encoder.pos_conv.named_parameters():
skip_keys.add(f"pos_conv.{k}")
self.ema = EMAModule(
self.encoder if self.cfg.ema_transformer_only else self,
ema_config,
skip_keys=skip_keys,
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is None and self.final_proj is not None:
logger.info(f"making ema teacher")
self.make_ema_teacher()
elif self.training and self.ema is not None:
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay)
if self.ema.get_decay() < 1:
self.ema.step(self.encoder if self.cfg.ema_transformer_only else self)
self.num_updates = num_updates
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if self.ema is not None:
k = prefix + "_ema"
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@classmethod
def build_model(cls, cfg: Data2VecAudioConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=1,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
require_same_masks=self.cfg.require_same_masks,
mask_dropout=self.cfg.mask_dropout,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
features = source
if self.feature_grad_mult > 0:
features = self.feature_extractor(features)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(features)
features = features.transpose(1, 2)
features = self.layer_norm(features)
orig_padding_mask = padding_mask
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
pre_encoder_features = None
if self.cfg.ema_transformer_only:
pre_encoder_features = features.clone()
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
else:
x = features
mask_indices = None
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=layer,
)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"layer_results": layer_results,
}
result = {
"losses": {},
}
with torch.no_grad():
self.ema.model.eval()
if self.cfg.ema_transformer_only:
y, layer_results = self.ema.model.extract_features(
pre_encoder_features,
padding_mask=padding_mask,
min_layer=self.cfg.encoder_layers - self.average_top_k_layers,
)
y = {
"x": y,
"padding_mask": padding_mask,
"layer_results": layer_results,
}
else:
y = self.ema.model.extract_features(
source=source,
padding_mask=orig_padding_mask,
mask=False,
)
target_layer_results = [l[2] for l in y["layer_results"]]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
target_layer_results = [
tl.permute(1, 2, 0) for tl in target_layer_results # TBC -> BCT
]
permuted = True
if self.cfg.batch_norm_target_layer:
target_layer_results = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in target_layer_results
]
if self.cfg.instance_norm_target_layer:
target_layer_results = [
F.instance_norm(tl.float()) for tl in target_layer_results
]
if permuted:
target_layer_results = [
tl.transpose(1, 2) for tl in target_layer_results # BCT -> BTC
]
if self.cfg.group_norm_target_layer:
target_layer_results = [
F.layer_norm(tl.float(), tl.shape[-2:])
for tl in target_layer_results
]
if self.cfg.layer_norm_target_layer:
target_layer_results = [
F.layer_norm(tl.float(), tl.shape[-1:])
for tl in target_layer_results
]
y = sum(target_layer_results) / len(target_layer_results)
if self.cfg.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.float().transpose(1, 2)).transpose(1, 2)
if not permuted:
y = y.transpose(0, 1)
y = y[mask_indices]
x = x[mask_indices]
x = self.final_proj(x)
sz = x.size(-1)
if self.loss_beta == 0:
loss = F.mse_loss(x.float(), y.float(), reduction="none").sum(dim=-1)
else:
loss = F.smooth_l1_loss(
x.float(), y.float(), reduction="none", beta=self.loss_beta
).sum(dim=-1)
if self.loss_scale is not None:
scale = self.loss_scale
else:
scale = 1 / math.sqrt(sz)
result["losses"]["regression"] = loss.sum() * scale
if "sample_size" not in result:
result["sample_size"] = loss.numel()
with torch.no_grad():
result["target_var"] = self.compute_var(y)
result["pred_var"] = self.compute_var(x.float())
if self.num_updates > 5000 and result["target_var"] < self.cfg.min_target_var:
logger.error(
f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting"
)
raise Exception(
f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting"
)
if self.num_updates > 5000 and result["pred_var"] < self.cfg.min_pred_var:
logger.error(
f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting"
)
raise Exception(
f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting"
)
if self.ema is not None:
result["ema_decay"] = self.ema.get_decay() * 1000
return result
@staticmethod
def compute_var(y):
y = y.view(-1, y.size(-1))
if dist.is_initialized():
zc = torch.tensor(y.size(0)).cuda()
zs = y.sum(dim=0)
zss = (y ** 2).sum(dim=0)
dist.all_reduce(zc)
dist.all_reduce(zs)
dist.all_reduce(zss)
var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
return torch.sqrt(var + 1e-6).mean()
else:
return torch.sqrt(y.var(dim=0) + 1e-6).mean()
def extract_features(
self, source, padding_mask, mask=False, layer=None
):
res = self.forward(
source,
padding_mask,
mask=mask,
features_only=True,
layer=layer,
)
return res
def remove_pretraining_modules(self, last_layer=None):
self.final_proj = None
self.ema = None
if last_layer is not None:
self.encoder.layers = nn.ModuleList(
l for i, l in enumerate(self.encoder.layers) if i <= last_layer
)
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec_audio.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is adapted from the BeiT implementation which can be found here:
# https://github.com/microsoft/unilm/tree/master/beit
import logging
from dataclasses import dataclass
from typing import Any
from omegaconf import II, MISSING
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, tasks
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
logger = logging.getLogger(__name__)
@dataclass
class Data2VecImageClassificationConfig(FairseqDataclass):
model_path: str = MISSING
no_pretrained_weights: bool = False
num_classes: int = 1000
mixup: float = 0.8
cutmix: float = 1.0
label_smoothing: float = 0.1
pretrained_model_args: Any = None
data: str = II("task.data")
@register_model(
"data2vec_image_classification", dataclass=Data2VecImageClassificationConfig
)
class Data2VecImageClassificationModel(BaseFairseqModel):
def __init__(self, cfg: Data2VecImageClassificationConfig):
super().__init__()
self.cfg = cfg
if cfg.pretrained_model_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.model_path, {})
pretrained_args = state.get("cfg", None)
pretrained_args.criterion = None
pretrained_args.lr_scheduler = None
cfg.pretrained_model_args = pretrained_args
logger.info(pretrained_args)
else:
state = None
pretrained_args = cfg.pretrained_model_args
pretrained_args.task.data = cfg.data
task = tasks.setup_task(pretrained_args.task)
model = task.build_model(pretrained_args.model, from_checkpoint=True)
model.remove_pretraining_modules()
self.model = model
if state is not None and not cfg.no_pretrained_weights:
self.load_model_weights(state, model, cfg)
self.fc_norm = nn.LayerNorm(pretrained_args.model.embed_dim)
self.head = nn.Linear(pretrained_args.model.embed_dim, cfg.num_classes)
self.head.weight.data.mul_(1e-3)
self.head.bias.data.mul_(1e-3)
self.mixup_fn = None
if cfg.mixup > 0 or cfg.cutmix > 0:
from timm.data import Mixup
self.mixup_fn = Mixup(
mixup_alpha=cfg.mixup,
cutmix_alpha=cfg.cutmix,
cutmix_minmax=None,
prob=1.0,
switch_prob=0.5,
mode="batch",
label_smoothing=cfg.label_smoothing,
num_classes=cfg.num_classes,
)
def load_model_weights(self, state, model, cfg):
if "_ema" in state["model"]:
del state["model"]["_ema"]
model.load_state_dict(state["model"], strict=True)
@classmethod
def build_model(cls, cfg: Data2VecImageClassificationConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def forward(
self,
img,
label=None,
):
if self.training and self.mixup_fn is not None and label is not None:
img, label = self.mixup_fn(img, label)
x = self.model(img, mask=False)
x = x[:, 1:]
x = self.fc_norm(x.mean(1))
x = self.head(x)
if label is None:
return x
if self.training and self.mixup_fn is not None:
loss = -label * F.log_softmax(x.float(), dim=-1)
else:
loss = F.cross_entropy(
x.float(),
label,
label_smoothing=self.cfg.label_smoothing if self.training else 0,
reduction="none",
)
result = {
"losses": {"regression": loss},
"sample_size": img.size(0),
}
if not self.training:
with torch.no_grad():
pred = x.argmax(-1)
correct = (pred == label).sum()
result["correct"] = correct
return result
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec_image_classification.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is adapted from the BeiT implementation which can be found here:
# https://github.com/microsoft/unilm/tree/master/beit
import logging
import math
import numpy as np
import random
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import II
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
logger = logging.getLogger(__name__)
@dataclass
class Data2VecVisionConfig(FairseqDataclass):
layer_scale_init_value: float = field(
default=1e-4, metadata={"help": "rescale layer outputs, 0 to disable"}
)
num_mask_patches: int = field(
default=75,
metadata={"help": "number of the visual tokens/patches need be masked"},
)
min_mask_patches_per_block: int = 16
max_mask_patches_per_block: int = 196
image_size: int = 224
patch_size: int = 16
in_channels: int = 3
shared_rel_pos_bias: bool = True
drop_path: float = 0.1
attention_dropout: float = 0.0
depth: int = 12
embed_dim: int = 768
num_heads: int = 12
mlp_ratio: int = 4
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
end_of_block_targets: bool = True
layer_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
batch_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_transformer_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer layers"},
)
def get_annealed_rate(start, end, curr_step, total_steps):
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
@register_model("data2vec_vision", dataclass=Data2VecVisionConfig)
class Data2VecVisionModel(BaseFairseqModel):
def __init__(self, cfg: Data2VecVisionConfig):
super().__init__()
self.cfg = cfg
self.ema = None
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_beta = cfg.loss_beta
self.loss_scale = (
cfg.loss_scale
if cfg.loss_scale is not None
else 1 / math.sqrt(cfg.embed_dim)
)
self.patch_embed = PatchEmbed(
img_size=cfg.image_size,
patch_size=cfg.patch_size,
in_chans=cfg.in_channels,
embed_dim=cfg.embed_dim,
)
patch_size = self.patch_embed.patch_size
self.window_size = (
cfg.image_size // patch_size[0],
cfg.image_size // patch_size[1],
)
self.cls_emb = nn.Parameter(torch.FloatTensor(1, 1, cfg.embed_dim))
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, cfg.embed_dim))
nn.init.trunc_normal_(self.cls_emb, 0.02)
nn.init.trunc_normal_(self.mask_emb, 0.02)
self.encoder = TransformerEncoder(cfg, self.patch_embed.patch_shape)
self.final_proj = nn.Linear(cfg.embed_dim, cfg.embed_dim)
self.num_updates = 0
def make_ema_teacher(self):
ema_config = EMAModuleConfig(
ema_decay=self.cfg.ema_decay,
ema_fp32=True,
)
self.ema = EMAModule(
self.encoder if self.cfg.ema_transformer_only else self,
ema_config,
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is None and self.final_proj is not None:
logger.info(f"making ema teacher")
self.make_ema_teacher()
elif self.training and self.ema is not None:
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay)
if self.ema.get_decay() < 1:
self.ema.step(self.encoder if self.cfg.ema_transformer_only else self)
self.num_updates = num_updates
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if self.ema is not None:
k = prefix + "_ema"
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@classmethod
def build_model(cls, cfg: Data2VecVisionConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def make_mask(self, bsz, num_masks, min_masks, max_masks):
height, width = self.window_size
masks = np.zeros(shape=(bsz, height, width), dtype=np.int)
for i in range(bsz):
mask = masks[i]
mask_count = 0
min_aspect = 0.3
max_aspect = 1 / min_aspect
log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
def _mask(mask, max_mask_patches):
delta = 0
for attempt in range(10):
target_area = random.uniform(min_masks, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < width and h < height:
top = random.randint(0, height - h)
left = random.randint(0, width - w)
num_masked = mask[top : top + h, left : left + w].sum()
# Overlap
if 0 < h * w - num_masked <= max_mask_patches:
for i in range(top, top + h):
for j in range(left, left + w):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
while mask_count < num_masks:
max_mask_patches = min(num_masks - mask_count, max_masks)
delta = _mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return torch.from_numpy(masks)
def forward(
self,
img,
mask: bool = True,
layer_results: bool = False,
):
x = self.patch_embed(img)
batch_size, seq_len, _ = x.size()
if mask:
mask_indices = self.make_mask(
img.size(0),
self.cfg.num_mask_patches,
self.cfg.min_mask_patches_per_block,
self.cfg.max_mask_patches_per_block,
)
bool_mask = mask_indices.view(mask_indices.size(0), -1).bool()
else:
mask_indices = bool_mask = None
cls_tokens = self.cls_emb.expand(batch_size, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.ema is not None:
with torch.no_grad():
self.ema.model.eval()
if self.cfg.ema_transformer_only:
y = self.ema.model(
x,
layer_results="end" if self.cfg.end_of_block_targets else "fc",
)
else:
y = self.ema.model(
img,
mask=False,
layer_results=True,
)
y = y[-self.cfg.average_top_k_layers :]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
y = [tl.transpose(1, 2) for tl in y] # BTC -> BCT
permuted = True
if self.cfg.batch_norm_target_layer:
y = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in y
]
if self.cfg.instance_norm_target_layer:
y = [F.instance_norm(tl.float()) for tl in y]
if permuted:
y = [tl.transpose(1, 2) for tl in y] # BCT -> BTC
if self.cfg.layer_norm_target_layer:
y = [F.layer_norm(tl.float(), tl.shape[-1:]) for tl in y]
y = sum(y) / len(y)
if self.cfg.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.float().transpose(1, 2)).transpose(1, 2)
y = y[bool_mask].float()
if mask_indices is not None:
mask_token = self.mask_emb.expand(batch_size, seq_len, -1)
w = mask_indices.view(mask_indices.size(0), -1, 1).type_as(mask_token)
x[:, 1:] = x[:, 1:] * (1 - w) + mask_token * w
if layer_results:
enc_layer_results = "end" if self.cfg.end_of_block_targets else "fc"
else:
enc_layer_results = None
x = self.encoder(x, layer_results=enc_layer_results)
if layer_results or mask_indices is None:
return x
x = x[bool_mask].float()
if self.loss_beta == 0:
loss = F.mse_loss(x, y, reduction="none").sum(dim=-1)
else:
loss = F.smooth_l1_loss(x, y, reduction="none", beta=self.loss_beta).sum(
dim=-1
)
if self.loss_scale > 0:
loss = loss * self.loss_scale
result = {
"losses": {"regression": loss.sum()},
"sample_size": loss.numel(),
"target_var": self.compute_var(y),
"pred_var": self.compute_var(x),
"ema_decay": self.ema.get_decay() * 1000,
}
return result
@staticmethod
def compute_var(y):
y = y.view(-1, y.size(-1))
if dist.is_initialized():
zc = torch.tensor(y.size(0)).cuda()
zs = y.sum(dim=0)
zss = (y ** 2).sum(dim=0)
dist.all_reduce(zc)
dist.all_reduce(zs)
dist.all_reduce(zss)
var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
return torch.sqrt(var + 1e-6).mean()
else:
return torch.sqrt(y.var(dim=0) + 1e-6).mean()
def remove_pretraining_modules(self, last_layer=None):
self.final_proj = None
self.ema = None
self.encoder.norm = nn.Identity()
self.mask_emb = None
if last_layer is not None:
self.encoder.layers = nn.ModuleList(
l for i, l in enumerate(self.encoder.layers) if i <= last_layer
)
class PatchEmbed(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
if isinstance(img_size, int):
img_size = img_size, img_size
if isinstance(patch_size, int):
patch_size = patch_size, patch_size
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.conv = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
# BCHW -> BTC
x = self.conv(x).flatten(2).transpose(1, 2)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
attn_drop=0.0,
proj_drop=0.0,
window_size=None,
attn_head_dim=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2,
dtype=relative_coords.dtype,
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat(
(
self.q_bias,
torch.zeros_like(self.v_bias, requires_grad=False),
self.v_bias,
)
)
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.relative_position_bias_table is not None:
assert 1==2
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
print("attn.size() :", attn.size())
print("rel_pos_bias.size() :", rel_pos_bias.size())
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (
2 * window_size[1] - 1
) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1,
-1,
) # Wh*Ww,Wh*Ww,nH
print("self.window_size :", self.window_size)
print("self.num_relative_distance :", self.num_relative_distance)
print("self.relative_position_index :", self.relative_position_index.size(), self.relative_position_index)
print("relative_position_bias.size(), relative_position_bias :",relative_position_bias.size(), relative_position_bias)
print("self.relative_position_bias_table.size(), self.relative_position_bias_table :",self.relative_position_bias_table.size(), self.relative_position_bias_table)
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
if self.drop_prob == 0.0 or not self.training:
return x
keep_prob = 1 - self.drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
init_values=None,
window_size=None,
):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
attn_drop=attn_drop,
proj_drop=drop,
window_size=window_size,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = nn.Sequential(
nn.Linear(dim, mlp_hidden_dim),
nn.GELU(),
nn.Linear(mlp_hidden_dim, dim),
nn.Dropout(drop),
)
if init_values > 0:
self.gamma_1 = nn.Parameter(
init_values * torch.ones((dim)), requires_grad=True
)
self.gamma_2 = nn.Parameter(
init_values * torch.ones((dim)), requires_grad=True
)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
print("inside block :", x.size())
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
fc_feature = self.drop_path(self.mlp(self.norm2(x)))
x = x + fc_feature
else:
x = x + self.drop_path(
self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)
)
fc_feature = self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
x = x + fc_feature
return x, fc_feature
class TransformerEncoder(nn.Module):
def __init__(self, cfg: Data2VecVisionConfig, patch_shape):
super().__init__()
self.rel_pos_bias = None
if cfg.shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=patch_shape, num_heads=cfg.num_heads
)
dpr = [
x.item() for x in torch.linspace(0, cfg.drop_path, cfg.depth)
] # stochastic depth decay rule
print("TransformerEncoder > patch_shape :", patch_shape)
self.blocks = nn.ModuleList(
Block(
dim=cfg.embed_dim,
num_heads=cfg.num_heads,
attn_drop=cfg.attention_dropout,
drop_path=dpr[i],
init_values=cfg.layer_scale_init_value,
window_size=patch_shape if not cfg.shared_rel_pos_bias else None,
)
for i in range(cfg.depth)
)
self.norm = nn.LayerNorm(cfg.embed_dim)
self.apply(self.init_weights)
self.fix_init_weight()
def init_weights(self, m):
std = 0.02
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
nn.init.trunc_normal_(m.weight, std=std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp[2].weight.data, layer_id + 1)
def extract_features(self, x, layer_results):
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
z = []
for i, blk in enumerate(self.blocks):
x, fc_feature = blk(x, rel_pos_bias=rel_pos_bias)
if layer_results == "end":
z.append(x)
elif layer_results == "fc":
z.append(fc_feature)
return z if layer_results else self.norm(x)
def forward(self, x, layer_results=None):
x = self.extract_features(x, layer_results=layer_results)
if layer_results:
return [z[:, 1:] for z in x]
x = x[:, 1:]
return x
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/data2vec_vision.py |
EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
from functools import partial
from typing import Callable, Dict, Optional
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from fairseq.modules import PositionalEmbedding, FairseqDropout, LayerNorm
from fairseq.tasks import FairseqTask
from .base import D2vModalityConfig, ModalitySpecificEncoder, get_alibi_bias
from .modules import BlockEncoder, Decoder1d
from examples.data2vec.data.modality import Modality
@dataclass
class D2vTextConfig(D2vModalityConfig):
type: Modality = Modality.TEXT
max_source_positions: int = 512
learned_pos: bool = True
dropout: float = 0.1 # used for both local_encoder and contextualized encoder. tied with global transformer in data2vec_text
no_scale_embedding: bool = True
layernorm_embedding: bool = True
no_token_positional_embeddings: bool = False
class TextEncoder(ModalitySpecificEncoder):
modality_cfg: D2vTextConfig
def __init__(
self,
modality_cfg: D2vTextConfig,
embed_dim: int,
make_block: Callable[[float], nn.ModuleList],
norm_layer: Callable[[int], nn.LayerNorm],
layer_norm_first: bool,
alibi_biases: Dict,
task: Optional[FairseqTask],
):
self.pad_idx = task.source_dictionary.pad()
self.vocab_size = len(task.source_dictionary)
local_encoder = TextLocalEncoder(
vocab_size=self.vocab_size,
embed_dim=embed_dim,
max_source_positions=modality_cfg.max_source_positions,
pad_idx=self.pad_idx,
no_scale_embedding=modality_cfg.no_scale_embedding,
layernorm_embedding=modality_cfg.layernorm_embedding,
dropout=modality_cfg.dropout,
no_token_positional_embeddings=modality_cfg.no_token_positional_embeddings,
learned_pos=modality_cfg.learned_pos,
)
dpr = np.linspace(
modality_cfg.start_drop_path_rate,
modality_cfg.end_drop_path_rate,
modality_cfg.prenet_depth,
)
context_encoder = BlockEncoder(
nn.ModuleList(make_block(dpr[i]) for i in range(modality_cfg.prenet_depth)),
norm_layer(embed_dim)
if not layer_norm_first and modality_cfg.prenet_depth > 0
else None,
layer_norm_first,
modality_cfg.prenet_layerdrop,
modality_cfg.prenet_dropout if modality_cfg.prenet_depth > 0 else 0.0,
)
decoder = (
Decoder1d(modality_cfg.decoder, embed_dim)
if modality_cfg.decoder is not None
else None
)
alibi_bias_fn = partial(get_alibi_bias, alibi_biases=alibi_biases)
super().__init__(
modality_cfg=modality_cfg,
embed_dim=embed_dim,
local_encoder=local_encoder,
project_features=nn.Identity(),
fixed_positional_encoder=None,
relative_positional_encoder=None,
context_encoder=context_encoder,
decoder=decoder,
get_alibi_bias=alibi_bias_fn,
)
def reset_parameters(self):
super().reset_parameters()
def convert_padding_mask(self, x, padding_mask):
if padding_mask is None or padding_mask.size(1) == x.size(1):
return padding_mask
diff = self.downsample - padding_mask.size(1) % self.downsample
if 0 < diff < self.downsample:
padding_mask = F.pad(padding_mask, (0, diff), value=True)
padding_mask = padding_mask.view(padding_mask.size(0), -1, self.downsample)
padding_mask = padding_mask.all(-1)
if padding_mask.size(1) > x.size(1):
padding_mask = padding_mask[:, : x.size(1)]
assert x.size(1) == padding_mask.size(
1
), f"{x.size(1), padding_mask.size(1), diff, self.downsample}"
return padding_mask
class TextLocalEncoder(nn.Module):
def __init__(
self,
vocab_size,
embed_dim,
max_source_positions,
pad_idx,
no_scale_embedding,
layernorm_embedding,
dropout,
no_token_positional_embeddings,
learned_pos,
):
super().__init__()
self.pad_idx = pad_idx
self.dropout_module = FairseqDropout(dropout)
self.embed_tokens = nn.Embedding(vocab_size, embed_dim, pad_idx)
self.embed_scale = 1.0 if no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
max_source_positions,
embed_dim,
pad_idx,
learned=learned_pos,
)
if not no_token_positional_embeddings
else None
)
self.embed_scale = 1.0 if no_scale_embedding else math.sqrt(embed_dim)
self.layernorm_embedding = None
if layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
def forward(self, src_tokens):
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = x + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from functools import partial
from dataclasses import dataclass
from typing import Callable, Dict, Optional
from timm.models.layers import to_2tuple
from fairseq.tasks import FairseqTask
from examples.data2vec.models.mae import get_2d_sincos_pos_embed, PatchEmbed
from .base import (
D2vModalityConfig,
ModalitySpecificEncoder,
get_alibi_bias,
MaskSeed,
)
from .modules import (
BlockEncoder,
Decoder2d,
FixedPositionalEncoder,
TransformerDecoder,
EncDecTransformerDecoder,
)
from examples.data2vec.data.modality import Modality
@dataclass
class D2vImageConfig(D2vModalityConfig):
type: Modality = Modality.IMAGE
input_size: int = 224
in_chans: int = 3
patch_size: int = 16
embed_dim: int = 768
alibi_dims: int = 2
alibi_distance: str = "manhattan"
fixed_positions: bool = True
transformer_decoder: bool = False
enc_dec_transformer: bool = False
class ImageEncoder(ModalitySpecificEncoder):
modality_cfg: D2vImageConfig
def __init__(
self,
modality_cfg: D2vImageConfig,
embed_dim: int,
make_block: Callable[[float, Optional[int], Optional[int]], nn.ModuleList],
norm_layer: Callable[[int], nn.LayerNorm],
layer_norm_first: bool,
alibi_biases: Dict,
task: Optional[FairseqTask],
):
img_size = to_2tuple(modality_cfg.input_size)
patch_size = to_2tuple(modality_cfg.patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
local_encoder = PatchEmbed(
modality_cfg.input_size,
modality_cfg.patch_size,
modality_cfg.in_chans,
modality_cfg.embed_dim,
)
w = local_encoder.proj.weight.data
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
if modality_cfg.embed_dim != embed_dim:
local_encoder = nn.Sequential(
local_encoder,
nn.Linear(modality_cfg.embed_dim, embed_dim),
)
project_features = nn.Identity()
pos_embed = nn.Parameter(
torch.zeros(1, num_patches, embed_dim), requires_grad=False
)
side_n = int(num_patches ** 0.5)
emb = get_2d_sincos_pos_embed(
pos_embed.shape[-1],
side_n,
cls_token=False,
)
pos_embed.data.copy_(torch.from_numpy(emb).float().unsqueeze(0))
fixed_positional_encoder = (
FixedPositionalEncoder(pos_embed) if modality_cfg.fixed_positions else None
)
dpr = np.linspace(
modality_cfg.start_drop_path_rate,
modality_cfg.end_drop_path_rate,
modality_cfg.prenet_depth,
)
context_encoder = BlockEncoder(
nn.ModuleList(make_block(dpr[i]) for i in range(modality_cfg.prenet_depth)),
norm_layer(embed_dim) if not layer_norm_first else None,
layer_norm_first,
modality_cfg.prenet_layerdrop,
modality_cfg.prenet_dropout,
)
if modality_cfg.transformer_decoder:
if modality_cfg.enc_dec_transformer:
decoder = EncDecTransformerDecoder(modality_cfg.decoder, embed_dim)
else:
dec_enc = BlockEncoder(
nn.ModuleList(
make_block(0, modality_cfg.decoder.decoder_dim, 8)
for _ in range(modality_cfg.decoder.decoder_layers)
),
None,
layer_norm_first,
0,
0,
)
decoder = TransformerDecoder(modality_cfg.decoder, embed_dim, dec_enc)
else:
decoder = (
Decoder2d(modality_cfg.decoder, embed_dim, side_n, side_n)
if modality_cfg.decoder is not None
else None
)
alibi_bias_fn = partial(
get_alibi_bias,
alibi_biases=alibi_biases,
heads=modality_cfg.num_alibi_heads,
dims=modality_cfg.alibi_dims,
distance=modality_cfg.alibi_distance,
)
super().__init__(
modality_cfg=modality_cfg,
embed_dim=embed_dim,
local_encoder=local_encoder,
project_features=project_features,
fixed_positional_encoder=fixed_positional_encoder,
relative_positional_encoder=None,
context_encoder=context_encoder,
decoder=decoder,
get_alibi_bias=alibi_bias_fn,
)
def reset_parameters(self):
super().reset_parameters()
if self.decoder is not None:
self.decoder.reset_parameters()
@torch.no_grad()
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.modality_cfg.patch_size
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum("nchpwq->nhwpqc", x)
x = x.reshape(shape=(imgs.shape[0], h * w, p ** 2 * 3))
return x
@torch.no_grad()
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.modality_cfg.patch_size
h = w = int(x.shape[1] ** 0.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
x = torch.einsum("nhwpqc->nchpwq", x)
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
return imgs
def compute_mask(
self,
x,
padding_mask,
mask_seed: Optional[MaskSeed],
apply,
shape=None,
precomputed_mask=None,
):
mlen = self.modality_cfg.mask_length
if mlen <= 1:
return super().compute_mask(
x, padding_mask, mask_seed, apply, precomputed_mask
)
if precomputed_mask is not None:
mask = precomputed_mask
else:
from fairseq.data.data_utils import compute_block_mask_2d
if shape is not None:
B, L, D = shape
else:
B, L, D = x.shape
mask = compute_block_mask_2d(
shape=(B, L),
mask_prob=self.modality_cfg.mask_prob,
mask_length=self.modality_cfg.mask_length,
mask_prob_adjust=self.modality_cfg.mask_prob_adjust,
inverse_mask=self.modality_cfg.inverse_mask,
require_same_masks=True,
mask_dropout=self.modality_cfg.mask_dropout,
)
mask_info = self.make_maskinfo(x, mask, shape)
if apply:
x = self.apply_mask(x, mask_info)
return x, mask_info
def decoder_input(self, x, mask_info):
if (
not self.modality_cfg.transformer_decoder
or not self.modality_cfg.enc_dec_transformer
):
return super().decoder_input(x, mask_info)
inp_drop = self.modality_cfg.decoder.input_dropout
if inp_drop > 0:
x = F.dropout(x, inp_drop, training=self.training, inplace=True)
kv = x[:, self.modality_cfg.num_extra_tokens :]
assert self.fixed_positional_encoder is not None
pos = self.fixed_positional_encoder(x, None).expand(x.size(0), -1, -1)
mask = mask_info.mask.bool()
if self.modality_cfg.decoder.add_positions_all:
kv = kv + pos[~mask].view(kv.shape)
q = pos[mask].view(x.size(0), -1, x.size(-1))
return q, kv
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/images.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import torch
import torch.nn as nn
import numpy as np
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
from fairseq.models.wav2vec import ConvFeatureExtractionModel
from fairseq.modules import (
LayerNorm,
SamePad,
TransposeLast,
)
from fairseq.tasks import FairseqTask
from .base import D2vModalityConfig, ModalitySpecificEncoder, get_alibi_bias
from .modules import BlockEncoder, Decoder1d
from examples.data2vec.data.modality import Modality
@dataclass
class D2vAudioConfig(D2vModalityConfig):
type: Modality = Modality.AUDIO
extractor_mode: str = "layer_norm"
feature_encoder_spec: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_pos_width: int = field(
default=95,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
conv_pos_depth: int = field(
default=5,
metadata={"help": "depth of positional encoder network"},
)
conv_pos_pre_ln: bool = False
class AudioEncoder(ModalitySpecificEncoder):
modality_cfg: D2vAudioConfig
def __init__(
self,
modality_cfg: D2vAudioConfig,
embed_dim: int,
make_block: Callable[[float], nn.ModuleList],
norm_layer: Callable[[int], nn.LayerNorm],
layer_norm_first: bool,
alibi_biases: Dict,
task: Optional[FairseqTask],
):
self.feature_enc_layers = eval(modality_cfg.feature_encoder_spec)
feature_embed_dim = self.feature_enc_layers[-1][0]
local_encoder = ConvFeatureExtractionModel(
conv_layers=self.feature_enc_layers,
dropout=0.0,
mode=modality_cfg.extractor_mode,
conv_bias=False,
)
project_features = nn.Sequential(
TransposeLast(),
nn.LayerNorm(feature_embed_dim),
nn.Linear(feature_embed_dim, embed_dim),
)
num_pos_layers = modality_cfg.conv_pos_depth
k = max(3, modality_cfg.conv_pos_width // num_pos_layers)
positional_encoder = nn.Sequential(
TransposeLast(),
*[
nn.Sequential(
nn.Conv1d(
embed_dim,
embed_dim,
kernel_size=k,
padding=k // 2,
groups=modality_cfg.conv_pos_groups,
),
SamePad(k),
TransposeLast(),
LayerNorm(embed_dim, elementwise_affine=False),
TransposeLast(),
nn.GELU(),
)
for _ in range(num_pos_layers)
],
TransposeLast(),
)
if modality_cfg.conv_pos_pre_ln:
positional_encoder = nn.Sequential(LayerNorm(embed_dim), positional_encoder)
dpr = np.linspace(
modality_cfg.start_drop_path_rate,
modality_cfg.end_drop_path_rate,
modality_cfg.prenet_depth,
)
context_encoder = BlockEncoder(
nn.ModuleList(make_block(dpr[i]) for i in range(modality_cfg.prenet_depth)),
norm_layer(embed_dim) if not layer_norm_first else None,
layer_norm_first,
modality_cfg.prenet_layerdrop,
modality_cfg.prenet_dropout,
)
decoder = (
Decoder1d(modality_cfg.decoder, embed_dim)
if modality_cfg.decoder is not None
else None
)
alibi_bias_fn = partial(get_alibi_bias, alibi_biases=alibi_biases)
super().__init__(
modality_cfg=modality_cfg,
embed_dim=embed_dim,
local_encoder=local_encoder,
project_features=project_features,
fixed_positional_encoder=None,
relative_positional_encoder=positional_encoder,
context_encoder=context_encoder,
decoder=decoder,
get_alibi_bias=alibi_bias_fn,
)
def convert_padding_mask(self, x, padding_mask):
def get_feat_extract_output_lengths(input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
for i in range(len(self.feature_enc_layers)):
input_lengths = _conv_out_length(
input_lengths,
self.feature_enc_layers[i][1],
self.feature_enc_layers[i][2],
)
return input_lengths.to(torch.long)
if padding_mask is not None:
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = get_feat_extract_output_lengths(input_lengths)
if padding_mask.any():
padding_mask = torch.zeros(x.shape[:2], dtype=x.dtype, device=x.device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (
1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])
).bool()
else:
padding_mask = torch.zeros(
x.shape[:2], dtype=torch.bool, device=x.device
)
return padding_mask
def reset_parameters(self):
super().reset_parameters()
for mod in self.project_features.children():
if isinstance(mod, nn.Linear):
mod.reset_parameters()
if self.decoder is not None:
self.decoder.reset_parameters()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/audio.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from dataclasses import dataclass
from fairseq.modules import (
LayerNorm,
SamePad,
SamePad2d,
TransposeLast,
)
@dataclass
class D2vDecoderConfig:
decoder_dim: int = 384
decoder_groups: int = 16
decoder_kernel: int = 5
decoder_layers: int = 5
input_dropout: float = 0.1
add_positions_masked: bool = False
add_positions_all: bool = False
decoder_residual: bool = True
projection_layers: int = 1
projection_ratio: float = 2.0
class FixedPositionalEncoder(nn.Module):
def __init__(self, pos_embed):
super().__init__()
self.positions = pos_embed
def forward(self, x, padding_mask):
return self.positions
class TextFeatPositionalEncoder(nn.Module):
"""
Original encoder expects (B, T) long input. This module wraps it to take
local_encoder output which are (B, T, D) float tensors
"""
def __init__(self, pos_encoder):
super().__init__()
self.pos_encoder = pos_encoder
def forward(self, x, padding_mask):
# assume padded token embeddings are 0s
# TODO: consider using padding_mask as input
return self.pos_encoder(x[..., 0])
class BlockEncoder(nn.Module):
def __init__(self, blocks, norm_layer, layer_norm_first, layerdrop, dropout):
super().__init__()
self.blocks = blocks
self.norm = norm_layer
self.layer_norm_first = layer_norm_first
self.layerdrop = layerdrop
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, x, padding_mask, alibi_bias, alibi_scale):
if self.norm is not None and not self.layer_norm_first:
x = self.norm(x)
x = self.dropout(x)
for i, blk in enumerate(self.blocks):
if (
not self.training
or self.layerdrop == 0
or (np.random.random() > self.layerdrop)
):
ab = alibi_bias
if ab is not None and alibi_scale is not None:
scale = (
alibi_scale[i]
if alibi_scale.size(0) > 1
else alibi_scale.squeeze(0)
)
ab = ab * scale.type_as(ab)
x, _ = blk(x, padding_mask, ab)
if self.norm is not None and self.layer_norm_first:
x = self.norm(x)
return x
class DecoderBase(nn.Module):
decoder_cfg: D2vDecoderConfig
def __init__(self, cfg: D2vDecoderConfig):
super().__init__()
self.decoder_cfg = cfg
def reset_parameters(self):
for mod in self.proj.modules():
if isinstance(mod, nn.Linear):
mod.reset_parameters()
def add_residual(self, x, residual, i, mask_info):
if (
residual is None
or not self.decoder_cfg.decoder_residual
or residual.size(1) != x.size(1)
):
return x
ret = x + residual
return ret
class Decoder1d(DecoderBase):
def __init__(self, cfg: D2vDecoderConfig, input_dim):
super().__init__(cfg)
def make_block(in_dim):
block = [
nn.Conv1d(
in_dim,
cfg.decoder_dim,
kernel_size=cfg.decoder_kernel,
padding=cfg.decoder_kernel // 2,
groups=cfg.decoder_groups,
),
SamePad(cfg.decoder_kernel),
TransposeLast(),
LayerNorm(cfg.decoder_dim, elementwise_affine=False),
TransposeLast(),
nn.GELU(),
]
return nn.Sequential(*block)
self.blocks = nn.Sequential(
*[
make_block(input_dim if i == 0 else cfg.decoder_dim)
for i in range(cfg.decoder_layers)
]
)
projs = []
curr_dim = cfg.decoder_dim
for i in range(cfg.projection_layers - 1):
next_dim = int(curr_dim * cfg.projection_ratio) if i == 0 else curr_dim
projs.append(nn.Linear(curr_dim, next_dim))
projs.append(nn.GELU())
curr_dim = next_dim
projs.append(nn.Linear(curr_dim, input_dim))
if len(projs) == 1:
self.proj = projs[0]
else:
self.proj = nn.Sequential(*projs)
def forward(self, x, mask_info):
x = x.transpose(1, 2)
residual = x
for i, layer in enumerate(self.blocks):
x = layer(x)
x = self.add_residual(x, residual, i, mask_info)
residual = x
x = x.transpose(1, 2)
x = self.proj(x)
return x
class Decoder2d(DecoderBase):
def __init__(self, cfg: D2vDecoderConfig, input_dim, h_size, w_size):
super().__init__(cfg)
self.h_size = h_size
self.w_size = w_size
def make_block(in_dim):
block = [
nn.Conv2d(
in_dim,
cfg.decoder_dim,
kernel_size=cfg.decoder_kernel,
padding=cfg.decoder_kernel // 2,
groups=cfg.decoder_groups,
),
SamePad2d(cfg.decoder_kernel),
TransposeLast(tranpose_dim=-3),
LayerNorm(cfg.decoder_dim, elementwise_affine=False),
TransposeLast(tranpose_dim=-3),
nn.GELU(),
]
return nn.Sequential(*block)
self.blocks = nn.Sequential(
*[
make_block(input_dim if i == 0 else cfg.decoder_dim)
for i in range(cfg.decoder_layers)
]
)
self.proj = nn.Linear(cfg.decoder_dim, input_dim)
def forward(self, x, mask_info):
B, T, C = x.shape
x = x.transpose(1, 2).reshape(B, C, self.h_size, self.w_size)
residual = x
for i, layer in enumerate(self.blocks):
x = layer(x)
x = self.add_residual(x, residual, i, mask_info)
residual = x
x = x.reshape(B, -1, T).transpose(1, 2)
x = self.proj(x)
return x
class TransformerDecoder(nn.Module):
decoder_cfg: D2vDecoderConfig
def __init__(self, cfg: D2vDecoderConfig, input_dim, encoder):
super().__init__()
self.decoder_cfg = cfg
self.input_proj = nn.Linear(input_dim, cfg.decoder_dim)
self.encoder = encoder
self.proj = nn.Linear(cfg.decoder_dim, input_dim)
def reset_parameters(self):
from fairseq.modules.transformer_sentence_encoder import init_bert_params
self.apply(init_bert_params)
def forward(self, x, mask_info):
x = self.input_proj(x)
x = self.encoder(x, None, None, 1)
x = self.proj(x)
return x
class AltBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
mlp_drop=0.0,
post_mlp_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
layer_norm_first=True,
ffn_targets=False,
cosine_attention=False,
):
super().__init__()
self.layer_norm_first = layer_norm_first
self.ffn_targets = ffn_targets
from timm.models.vision_transformer import DropPath, Mlp
self.norm1 = norm_layer(dim)
self.attn = AltAttention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
cosine_attention=cosine_attention,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=mlp_drop,
)
self.post_mlp_dropout = nn.Dropout(post_mlp_drop, inplace=False)
def forward(self, x, padding_mask=None, alibi_bias=None):
if self.layer_norm_first:
x = x + self.drop_path(self.attn(self.norm1(x), padding_mask, alibi_bias))
r = x = self.mlp(self.norm2(x))
t = x
x = r + self.drop_path(self.post_mlp_dropout(x))
if not self.ffn_targets:
t = x
else:
x = x + self.drop_path(self.attn(x, padding_mask, alibi_bias))
r = x = self.norm1(x)
x = self.mlp(x)
t = x
x = self.norm2(r + self.drop_path(self.post_mlp_dropout(x)))
if not self.ffn_targets:
t = x
return x, t
class AltAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
cosine_attention=False,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.cosine_attention = cosine_attention
if cosine_attention:
self.logit_scale = nn.Parameter(
torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True
)
def forward(self, x, padding_mask=None, alibi_bias=None):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4) # qkv x B x H x L x D
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
dtype = q.dtype
if self.cosine_attention:
# cosine attention
attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(
self.logit_scale, max=torch.log(torch.tensor(1.0 / 0.01))
).exp()
attn = attn * logit_scale
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if alibi_bias is not None:
attn = attn.type_as(alibi_bias)
attn[:, : alibi_bias.size(1)] += alibi_bias
if padding_mask is not None and padding_mask.any():
attn = attn.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn = attn.softmax(dim=-1, dtype=torch.float32).to(dtype=dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2) #
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class EncDecAttention(nn.Module):
def __init__(
self,
q_dim,
kv_dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
cosine_attention=False,
):
super().__init__()
self.num_heads = num_heads
head_dim = q_dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q_proj = nn.Linear(q_dim, q_dim, bias=qkv_bias)
self.kv_proj = nn.Linear(kv_dim, 2 * q_dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(q_dim, q_dim)
self.proj_drop = nn.Dropout(proj_drop)
self.cosine_attention = cosine_attention
if cosine_attention:
self.logit_scale = nn.Parameter(
torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True
)
def forward(self, q, kv, padding_mask=None, alibi_bias=None):
B, N, C = q.shape
q = (
self.q_proj(q)
.reshape(B, N, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
) # B x H x L x D
kv = (
self.kv_proj(kv)
.reshape(B, -1, 2, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
) # kv x B x H x L x D
k, v = (
kv[0],
kv[1],
) # make torchscript happy (cannot use tensor as tuple)
dtype = q.dtype
if self.cosine_attention:
# cosine attention
attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(
self.logit_scale, max=torch.log(torch.tensor(1.0 / 0.01))
).exp()
attn = attn * logit_scale
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if alibi_bias is not None:
attn = attn.type_as(alibi_bias)
attn[:, : alibi_bias.size(1)] += alibi_bias
if padding_mask is not None and padding_mask.any():
attn = attn.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn = attn.softmax(dim=-1, dtype=torch.float32).to(dtype=dtype)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2) #
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class EncDecBlock(nn.Module):
def __init__(
self,
q_dim,
kv_dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
mlp_drop=0.0,
post_mlp_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
layer_norm_first=True,
cosine_attention=False,
first_residual=True,
):
super().__init__()
self.layer_norm_first = layer_norm_first
from timm.models.vision_transformer import DropPath, Mlp
self.norm1 = norm_layer(q_dim)
self.attn = EncDecAttention(
q_dim,
kv_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
cosine_attention=cosine_attention,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(q_dim)
mlp_hidden_dim = int(q_dim * mlp_ratio)
self.mlp = Mlp(
in_features=q_dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=mlp_drop,
)
self.post_mlp_dropout = nn.Dropout(post_mlp_drop, inplace=False)
self.first_residual = first_residual
def forward(self, q, kv, padding_mask=None, alibi_bias=None):
r = q if self.first_residual else 0
if self.layer_norm_first:
x = r + self.drop_path(
self.attn(self.norm1(q), kv, padding_mask, alibi_bias)
)
r = x = self.mlp(self.norm2(x))
x = r + self.drop_path(self.post_mlp_dropout(x))
else:
x = r + self.drop_path(self.attn(q, kv, padding_mask, alibi_bias))
r = x = self.norm1(x)
x = self.mlp(x)
x = self.norm2(r + self.drop_path(self.post_mlp_dropout(x)))
return x
class EncDecTransformerDecoder(nn.Module):
def __init__(self, cfg: D2vDecoderConfig, input_dim):
super().__init__()
self.input_proj = nn.Linear(input_dim, cfg.decoder_dim)
self.blocks = nn.Sequential(
*[
EncDecBlock(
q_dim=cfg.decoder_dim,
kv_dim=input_dim,
num_heads=8,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
mlp_drop=0.0,
post_mlp_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
layer_norm_first=False,
cosine_attention=False,
first_residual=i > 0,
)
for i in range(cfg.decoder_layers)
]
)
self.proj = nn.Linear(cfg.decoder_dim, input_dim)
def reset_parameters(self):
from fairseq.modules.transformer_sentence_encoder import init_bert_params
self.apply(init_bert_params)
def forward(self, x, kv):
x = self.input_proj(x)
for i, layer in enumerate(self.blocks):
x = layer(x, kv)
x = self.proj(x)
return x
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/modules.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import namedtuple
from dataclasses import dataclass
from functools import partial
from omegaconf import MISSING, II
from typing import Optional, Callable
from fairseq.data.data_utils import compute_mask_indices
from fairseq.modules import GradMultiply
from fairseq.utils import index_put
from examples.data2vec.data.modality import Modality
from .modules import D2vDecoderConfig
logger = logging.getLogger(__name__)
@dataclass
class D2vModalityConfig:
type: Modality = MISSING
prenet_depth: int = 4
prenet_layerdrop: float = 0
prenet_dropout: float = 0
start_drop_path_rate: float = 0
end_drop_path_rate: float = 0
num_extra_tokens: int = 0
init_extra_token_zero: bool = True
mask_noise_std: float = 0.01
mask_prob_min: Optional[float] = None
mask_prob: float = 0.7
inverse_mask: bool = False
mask_prob_adjust: float = 0
keep_masked_pct: float = 0
mask_length: int = 5
add_masks: bool = False
remove_masks: bool = False
mask_dropout: float = 0.0
encoder_zero_mask: bool = True
mask_channel_prob: float = 0.0
mask_channel_length: int = 64
ema_local_encoder: bool = False # used in data2vec_multi
local_grad_mult: float = 1.0
use_alibi_encoder: bool = False
alibi_scale: float = 1.0
learned_alibi: bool = False
alibi_max_pos: Optional[int] = None
learned_alibi_scale: bool = False
learned_alibi_scale_per_head: bool = False
learned_alibi_scale_per_layer: bool = False
num_alibi_heads: int = II("model.num_heads")
model_depth: int = II("model.depth")
decoder: Optional[D2vDecoderConfig] = D2vDecoderConfig()
MaskSeed = namedtuple("MaskSeed", ["seed", "update", "ids"])
MaskInfo = namedtuple("MaskInfo", ["x_unmasked", "mask", "ids_restore", "ids_keep"])
class ModalitySpecificEncoder(nn.Module):
def __init__(
self,
modality_cfg: D2vModalityConfig,
embed_dim: int,
local_encoder: nn.Module,
project_features: nn.Module,
fixed_positional_encoder: Optional[nn.Module],
relative_positional_encoder: Optional[nn.Module],
context_encoder: nn.Module,
decoder: nn.Module,
get_alibi_bias: Optional[Callable[[int, int, str, str], torch.Tensor]],
):
super().__init__()
self.modality_cfg = modality_cfg
self.local_encoder = local_encoder
self.project_features = project_features
self.fixed_positional_encoder = fixed_positional_encoder
self.relative_positional_encoder = relative_positional_encoder
self.context_encoder = context_encoder
self.decoder = decoder
self.get_alibi_bias = get_alibi_bias if modality_cfg.use_alibi_encoder else None
self.local_grad_mult = self.modality_cfg.local_grad_mult
self.extra_tokens = None
if modality_cfg.num_extra_tokens > 0:
self.extra_tokens = nn.Parameter(
torch.zeros(1, modality_cfg.num_extra_tokens, embed_dim)
)
if not modality_cfg.init_extra_token_zero:
nn.init.normal_(self.extra_tokens)
elif self.extra_tokens.size(1) > 1:
nn.init.normal_(self.extra_tokens[:, 1:])
self.alibi_scale = None
if self.get_alibi_bias is not None:
self.alibi_scale = nn.Parameter(
torch.full(
(
(modality_cfg.prenet_depth + modality_cfg.model_depth)
if modality_cfg.learned_alibi_scale_per_layer
else 1,
1,
self.modality_cfg.num_alibi_heads
if modality_cfg.learned_alibi_scale_per_head
else 1,
1,
1,
),
modality_cfg.alibi_scale,
dtype=torch.float,
),
requires_grad=modality_cfg.learned_alibi_scale,
)
if modality_cfg.learned_alibi and self.get_alibi_bias is not None:
assert modality_cfg.alibi_max_pos is not None
alibi_bias = self.get_alibi_bias(
batch_size=1,
time_steps=modality_cfg.alibi_max_pos,
heads=modality_cfg.num_alibi_heads,
scale=1.0,
dtype=torch.float,
device="cpu",
)
self.alibi_bias = nn.Parameter(alibi_bias)
self.get_alibi_bias = partial(
_learned_alibi_bias, alibi_bias=self.alibi_bias
)
def upgrade_state_dict_named(self, state_dict, name):
k = f"{name}.alibi_scale"
if k in state_dict and state_dict[k].dim() == 4:
state_dict[k] = state_dict[k].unsqueeze(0)
return state_dict
def convert_padding_mask(self, x, padding_mask):
return padding_mask
def decoder_input(self, x, mask_info: MaskInfo):
inp_drop = self.modality_cfg.decoder.input_dropout
if inp_drop > 0:
x = F.dropout(x, inp_drop, training=self.training, inplace=True)
num_extra = self.modality_cfg.num_extra_tokens
if mask_info is not None:
num_masked = mask_info.ids_restore.shape[1] - x.shape[1] + num_extra
mask_tokens = x.new_empty(
x.size(0),
num_masked,
x.size(-1),
).normal_(0, self.modality_cfg.mask_noise_std)
x_ = torch.cat([x[:, num_extra:], mask_tokens], dim=1)
x = torch.gather(x_, dim=1, index=mask_info.ids_restore)
if self.modality_cfg.decoder.add_positions_masked:
assert self.fixed_positional_encoder is not None
pos = self.fixed_positional_encoder(x, None)
x = x + (pos * mask_info.mask.unsqueeze(-1))
else:
x = x[:, num_extra:]
if self.modality_cfg.decoder.add_positions_all:
assert self.fixed_positional_encoder is not None
x = x + self.fixed_positional_encoder(x, None)
return x, mask_info
def local_features(self, features):
if self.local_grad_mult > 0:
if self.local_grad_mult == 1.0:
x = self.local_encoder(features)
else:
x = GradMultiply.apply(
self.local_encoder(features), self.local_grad_mult
)
else:
with torch.no_grad():
x = self.local_encoder(features)
x = self.project_features(x)
return x
def contextualized_features(
self,
x,
padding_mask,
mask,
remove_masked,
clone_batch: int = 1,
mask_seeds: Optional[torch.Tensor] = None,
precomputed_mask=None,
):
if padding_mask is not None:
padding_mask = self.convert_padding_mask(x, padding_mask)
local_features = x
if mask and clone_batch == 1:
local_features = local_features.clone()
orig_B, orig_T, _ = x.shape
pre_mask_B = orig_B
mask_info = None
x_pos = None
if self.fixed_positional_encoder is not None:
x = x + self.fixed_positional_encoder(x, padding_mask)
if mask:
if clone_batch > 1:
x = x.repeat_interleave(clone_batch, 0)
if mask_seeds is not None:
clone_hash = [
int(hash((mask_seeds.seed, ind)) % 1e10)
for ind in range(clone_batch - 1)
]
clone_hash = torch.tensor([0] + clone_hash).long().view(1, -1)
id = mask_seeds.ids
id = id.repeat_interleave(clone_batch, 0)
id = id.view(-1, clone_batch) + clone_hash.to(id)
id = id.view(-1)
mask_seeds = MaskSeed(
seed=mask_seeds.seed, update=mask_seeds.update, ids=id
)
if padding_mask is not None:
padding_mask = padding_mask.repeat_interleave(clone_batch, 0)
x, mask_info = self.compute_mask(
x,
padding_mask,
mask_seed=mask_seeds,
apply=self.relative_positional_encoder is not None or not remove_masked,
precomputed_mask=precomputed_mask,
)
if self.relative_positional_encoder is not None:
x_pos = self.relative_positional_encoder(x)
masked_padding_mask = padding_mask
if mask and remove_masked:
x = mask_info.x_unmasked
if x_pos is not None:
x = x + gather_unmasked(x_pos, mask_info)
if padding_mask is not None and padding_mask.any():
masked_padding_mask = gather_unmasked_mask(padding_mask, mask_info)
if not masked_padding_mask.any():
masked_padding_mask = None
else:
masked_padding_mask = None
elif x_pos is not None:
x = x + x_pos
alibi_bias = None
alibi_scale = self.alibi_scale
if self.get_alibi_bias is not None:
alibi_bias = self.get_alibi_bias(
batch_size=pre_mask_B,
time_steps=orig_T,
heads=self.modality_cfg.num_alibi_heads,
dtype=torch.float32,
device=x.device,
)
if alibi_scale is not None:
alibi_scale = alibi_scale.clamp_min(0)
if alibi_scale.size(0) == 1:
alibi_bias = alibi_bias * alibi_scale.squeeze(0).type_as(alibi_bias)
alibi_scale = None
if clone_batch > 1:
alibi_bias = alibi_bias.repeat_interleave(clone_batch, 0)
if mask_info is not None and remove_masked:
alibi_bias = masked_alibi(alibi_bias, mask_info)
if self.extra_tokens is not None:
num = self.extra_tokens.size(1)
x = torch.cat([self.extra_tokens.expand(x.size(0), -1, -1), x], dim=1)
if masked_padding_mask is not None:
# B x T
masked_padding_mask = F.pad(masked_padding_mask, (num, 0))
if alibi_bias is not None:
# B x H x T x T
alibi_bias = F.pad(alibi_bias, (num, 0, num, 0))
x = self.context_encoder(
x,
masked_padding_mask,
alibi_bias,
alibi_scale[: self.modality_cfg.prenet_depth]
if alibi_scale is not None
else None,
)
return {
"x": x,
"local_features": local_features,
"padding_mask": masked_padding_mask,
"alibi_bias": alibi_bias,
"alibi_scale": alibi_scale[self.modality_cfg.prenet_depth :]
if alibi_scale is not None and alibi_scale.size(0) > 1
else alibi_scale,
"encoder_mask": mask_info,
}
def forward(
self,
features,
padding_mask,
mask: bool,
remove_masked: bool,
clone_batch: int = 1,
mask_seeds: Optional[torch.Tensor] = None,
precomputed_mask=None,
):
x = self.local_features(features)
return self.contextualized_features(
x,
padding_mask,
mask,
remove_masked,
clone_batch,
mask_seeds,
precomputed_mask,
)
def reset_parameters(self):
pass
def compute_mask(
self,
x,
padding_mask,
mask_seed: Optional[MaskSeed],
apply,
precomputed_mask,
):
if precomputed_mask is not None:
mask = precomputed_mask
mask_info = self.make_maskinfo(x, mask)
else:
B, T, C = x.shape
cfg = self.modality_cfg
mask_prob = cfg.mask_prob
if (
cfg.mask_prob_min is not None
and cfg.mask_prob_min >= 0
and cfg.mask_prob_min < mask_prob
):
mask_prob = np.random.uniform(cfg.mask_prob_min, mask_prob)
if mask_prob > 0:
if cfg.mask_length == 1:
mask_info = random_masking(x, mask_prob, mask_seed)
else:
if self.modality_cfg.inverse_mask:
mask_prob = 1 - mask_prob
mask = compute_mask_indices(
(B, T),
padding_mask,
mask_prob,
cfg.mask_length,
min_masks=1,
require_same_masks=True,
mask_dropout=cfg.mask_dropout,
add_masks=cfg.add_masks,
seed=mask_seed.seed if mask_seed is not None else None,
epoch=mask_seed.update if mask_seed is not None else None,
indices=mask_seed.ids if mask_seed is not None else None,
)
mask = torch.from_numpy(mask).to(device=x.device)
if self.modality_cfg.inverse_mask:
mask = 1 - mask
mask_info = self.make_maskinfo(x, mask)
else:
mask_info = None
if apply:
x = self.apply_mask(x, mask_info)
return x, mask_info
def make_maskinfo(self, x, mask, shape=None):
if shape is None:
B, T, D = x.shape
else:
B, T, D = shape
mask = mask.to(torch.uint8)
ids_shuffle = mask.argsort(dim=1)
ids_restore = ids_shuffle.argsort(dim=1).unsqueeze(-1).expand(-1, -1, D)
len_keep = T - mask[0].sum()
if self.modality_cfg.keep_masked_pct > 0:
len_keep += round((T - int(len_keep)) * self.modality_cfg.keep_masked_pct)
ids_keep = ids_shuffle[:, :len_keep]
if shape is not None:
x_unmasked = None
else:
ids_keep = ids_keep.unsqueeze(-1).expand(-1, -1, D)
x_unmasked = torch.gather(x, dim=1, index=ids_keep)
mask_info = MaskInfo(
x_unmasked=x_unmasked,
mask=mask,
ids_restore=ids_restore,
ids_keep=ids_keep,
)
return mask_info
def apply_mask(self, x, mask_info):
cfg = self.modality_cfg
B, T, C = x.shape
if mask_info is not None:
mask = mask_info.mask
if cfg.encoder_zero_mask:
x = x * (1 - mask.type_as(x).unsqueeze(-1))
else:
num_masks = mask.sum().item()
masks = x.new_empty(num_masks, x.size(-1)).normal_(
0, cfg.mask_noise_std
)
x = index_put(x, mask, masks)
if cfg.mask_channel_prob > 0:
mask_channel = compute_mask_indices(
(B, C),
None,
cfg.mask_channel_prob,
cfg.mask_channel_length,
)
mask_channel = (
torch.from_numpy(mask_channel)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel, 0)
return x
def remove_pretraining_modules(self, keep_decoder=False):
if not keep_decoder:
self.decoder = None
def get_annealed_rate(start, end, curr_step, total_steps):
if curr_step >= total_steps:
return end
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
# adapted from MAE
def random_masking(x, mask_ratio, mask_seed: Optional[MaskSeed]):
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
generator = None
if mask_seed is not None:
seed = int(
hash((mask_seed.seed, mask_seed.update, mask_seed.ids.sum().item())) % 1e6
)
generator = torch.Generator(device=x.device)
generator.manual_seed(seed)
noise = torch.rand(N, L, generator=generator, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = noise.argsort(dim=1) # ascend: small is keep, large is remove
ids_restore = ids_shuffle.argsort(dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
ids_keep = ids_keep.unsqueeze(-1).expand(-1, -1, D)
x_unmasked = torch.gather(x, dim=1, index=ids_keep)
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], dtype=x.dtype, device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
ids_restore = ids_restore.unsqueeze(-1).expand(-1, -1, D)
return MaskInfo(
x_unmasked=x_unmasked, mask=mask, ids_restore=ids_restore, ids_keep=ids_keep
)
def gather_unmasked(x: torch.Tensor, mask_info: MaskInfo) -> torch.Tensor:
return torch.gather(
x,
dim=1,
index=mask_info.ids_keep,
)
def gather_unmasked_mask(x: torch.Tensor, mask_info: MaskInfo) -> torch.Tensor:
return torch.gather(
x,
dim=1,
index=mask_info.ids_keep[..., 0], # ignore the feature dimension
)
def get_alibi(
max_positions: int,
attention_heads: int,
dims: int = 1,
distance: str = "manhattan",
):
def get_slopes(n):
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
# In the paper, we only train models that have 2^a heads for some
# a. This function has some good properties that only occur when
# the input is a power of 2. To maintain that even when the number
# of heads is not a power of 2, we use this workaround.
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
maxpos = max_positions
attn_heads = attention_heads
slopes = torch.Tensor(get_slopes(attn_heads))
if dims == 1:
# prepare alibi position linear bias. Note that wav2vec2 is non
# autoregressive model so we want a symmetric mask with 0 on the
# diagonal and other wise linear decreasing valuees
pos_bias = (
torch.abs(
torch.arange(maxpos).unsqueeze(0) - torch.arange(maxpos).unsqueeze(1)
)
* -1
)
elif dims == 2:
if distance == "manhattan":
df = lambda x1, y1, x2, y2: abs(x1 - x2) + abs(y1 - y2)
elif distance == "euclidean":
df = lambda x1, y1, x2, y2: math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
n = math.sqrt(max_positions)
assert n.is_integer(), n
n = int(n)
pos_bias = torch.zeros((max_positions, max_positions))
for i in range(n):
for j in range(n):
for k in range(n):
for l in range(n):
new_x = i * n + j
new_y = k * n + l
pos_bias[new_x, new_y] = -df(i, j, k, l)
else:
raise Exception(f"unsupported number of alibi dims: {dims}")
alibi_bias = slopes.unsqueeze(1).unsqueeze(1) * pos_bias.unsqueeze(0).expand(
attn_heads, -1, -1
)
return alibi_bias
def get_alibi_bias(
alibi_biases,
batch_size,
time_steps,
heads,
dtype,
device,
dims=1,
distance="manhattan",
):
cache_key = f"{dims}_{heads}_{distance}"
buffered = alibi_biases.get(cache_key, None)
target_size = heads * batch_size
if (
buffered is None
or buffered.size(0) < target_size
or buffered.size(1) < time_steps
or buffered.dtype != dtype
or buffered.device != device
):
bt = max(time_steps, buffered.size(1) if buffered is not None else 0)
bn = max(target_size, buffered.size(0) if buffered is not None else 0) // heads
buffered = (
get_alibi(bt, heads, dims=dims, distance=distance)
.to(dtype=dtype, device=device)
.repeat(bn, 1, 1)
)
alibi_biases[cache_key] = buffered
b = buffered[:target_size, :time_steps, :time_steps]
b = b.view(batch_size, heads, time_steps, time_steps)
return b
def _learned_alibi_bias(
alibi_bias,
batch_size,
time_steps,
heads,
scale,
dtype,
device,
):
assert alibi_bias.size(1) == heads, alibi_bias.shape
assert alibi_bias.dtype == dtype, alibi_bias.dtype
assert alibi_bias.device == device, alibi_bias.device
if alibi_bias.size(-1) < time_steps:
psz = math.ceil((time_steps - alibi_bias.size(-1)) / 2)
alibi_bias = F.pad(alibi_bias, (psz, psz, psz, psz), mode="replicate")
alibi_bias = alibi_bias.expand(batch_size, -1, -1, -1) * scale
return alibi_bias[..., :time_steps, :time_steps]
def masked_alibi(alibi_bias, mask_info):
H = alibi_bias.size(1)
orig_bias = alibi_bias
index = mask_info.ids_keep.unsqueeze(1)[..., 0].unsqueeze(-1)
alibi_bias = torch.gather(
orig_bias,
dim=-2,
index=index.expand(-1, H, -1, mask_info.ids_restore.size(1)),
)
alibi_bias = torch.gather(
alibi_bias,
dim=-1,
index=index.transpose(-1, -2).expand(-1, H, alibi_bias.size(-2), -1),
)
return alibi_bias
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/models/modalities/base.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
def get_parser():
parser = argparse.ArgumentParser(description="convert audioset labels")
# fmt: off
parser.add_argument('in_file', help='audioset csv file to convert')
parser.add_argument('--manifest', required=True, metavar='PATH', help='wav2vec-like manifest')
parser.add_argument('--descriptors', required=True, metavar='PATH', help='path to label descriptor file')
parser.add_argument('--output', required=True, metavar='PATH', help='where to output converted labels')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
label_descriptors = {}
with open(args.descriptors, "r") as ldf:
next(ldf)
for line in ldf:
if line.strip() == "":
continue
items = line.split(",")
assert len(items) > 2, line
idx = items[0]
lbl = items[1]
assert lbl not in label_descriptors, lbl
label_descriptors[lbl] = idx
labels = {}
with open(args.in_file, "r") as ifd:
for line in ifd:
if line.lstrip().startswith("#"):
continue
items = line.rstrip().split(",")
id = items[0].strip()
start = items[1].strip()
end = items[2].strip()
lbls = [label_descriptors[it.strip(' "')] for it in items[3:]]
labels[id] = [start, end, ",".join(lbls)]
with open(args.manifest, "r") as mf, open(args.output, "w") as of:
next(mf)
for line in mf:
path, _ = line.split("\t")
id = os.path.splitext(os.path.basename(path))[0]
lbl = labels[id]
print("\t".join(lbl), file=of)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/scripts/convert_audioset_labels.py |
from valids import parser, main as valids_main
import os.path as osp
args = parser.parse_args()
args.target = "valid_accuracy"
args.best_biggest = True
args.best = True
args.last = 0
args.path_contains = None
res = valids_main(args, print_output=False)
grouped = {}
for k, v in res.items():
k = osp.dirname(k)
run = osp.dirname(k)
task = osp.basename(k)
val = v["valid_accuracy"]
if run not in grouped:
grouped[run] = {}
grouped[run][task] = val
for run, tasks in grouped.items():
print(run)
avg = sum(float(v) for v in tasks.values()) / len(tasks)
avg_norte = sum(float(v) for k,v in tasks.items() if k != 'rte') / (len(tasks) -1)
try:
print(f"{tasks['cola']}\t{tasks['qnli']}\t{tasks['mrpc']}\t{tasks['rte']}\t{tasks['sst_2']}\t{avg:.2f}\t{avg_norte:.2f}")
except:
print(tasks)
print()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/scripts/text/glue.py |
import os, argparse, re, json, copy, math
from collections import OrderedDict
import numpy as np
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('base', help='base log path')
parser.add_argument('--file_name', default='train.log', help='the log file name')
parser.add_argument('--target', default='valid_loss', help='target metric')
parser.add_argument('--last', type=int, default=999999999, help='print last n matches')
parser.add_argument('--last_files', type=int, default=None, help='print last x files')
parser.add_argument('--everything', action='store_true', help='print everything instead of only last match')
parser.add_argument('--path_contains', help='only consider matching file pattern')
parser.add_argument('--group_on', help='if set, groups by this metric and shows table of differences')
parser.add_argument('--epoch', help='epoch for comparison', type=int)
parser.add_argument('--skip_empty', action='store_true', help='skip empty results')
parser.add_argument('--skip_containing', help='skips entries containing this attribute')
parser.add_argument('--unique_epochs', action='store_true', help='only consider the last line fore each epoch')
parser.add_argument('--best', action='store_true', help='print the last best result')
parser.add_argument('--avg_params', help='average these params through entire log')
parser.add_argument('--extract_prev', help='extracts this metric from previous line')
parser.add_argument('--remove_metric', help='extracts this metric from previous line')
parser.add_argument('--compact', action='store_true', help='if true, just prints checkpoint <tab> best val')
parser.add_argument('--hydra', action='store_true', help='if true, uses hydra param conventions')
parser.add_argument('--best_biggest', action='store_true', help='if true, best is the biggest number, not smallest')
parser.add_argument('--key_len', type=int, default=10, help='max length of key')
parser.add_argument('--best_only', action='store_true', help='if set, only prints the best value')
parser.add_argument('--flat', action='store_true', help='just print the best results')
def main(args, print_output):
ret = {}
entries = []
def extract_metric(s, metric):
try:
j = json.loads(s)
except:
return None
if args.epoch is not None and ('epoch' not in j or j['epoch'] != args.epoch):
return None
return j[metric] if metric in j else None
def extract_params(s):
s = s.replace(args.base, '', 1)
if args.path_contains is not None:
s = s.replace(args.path_contains, '', 1)
if args.hydra:
num_matches = re.findall(r'(?:/|__)([^/:]+):(\d+\.?\d*)', s)
# str_matches = re.findall(r'(?:/|__)([^/:]+):([^\.]*[^\d\.]+)(?:/|__)', s)
str_matches = re.findall(r'(?:/|__)?((?:(?!(?:\:|__)).)+):([^\.]*[^\d\.]+\d*)(?:/|__)', s)
lr_matches = re.findall(r'optimization.(lr):\[([\d\.,]+)\]', s)
task_matches = re.findall(r'.*/(\d+)$', s)
else:
num_matches = re.findall(r'\.?([^\.]+?)(\d+(e\-\d+)?(?:\.\d+)?)(\.|$)', s)
str_matches = re.findall(r'[/\.]([^\.]*[^\d\.]+\d*)(?=\.)', s)
lr_matches = []
task_matches = []
cp_matches = re.findall(r'checkpoint(?:_\d+)?_(\d+).pt', s)
items = OrderedDict()
for m in str_matches:
if isinstance(m, tuple):
if 'checkpoint' not in m[0]:
items[m[0]] = m[1]
else:
items[m] = ''
for m in num_matches:
items[m[0]] = m[1]
for m in lr_matches:
items[m[0]] = m[1]
for m in task_matches:
items["hydra_task"] = m
for m in cp_matches:
items['checkpoint'] = m
return items
abs_best = None
sources = []
for root, _, files in os.walk(args.base):
if args.path_contains is not None and not args.path_contains in root:
continue
for f in files:
if f.endswith(args.file_name):
sources.append((root, f))
if args.last_files is not None:
sources = sources[-args.last_files:]
for root, file in sources:
with open(os.path.join(root, file), 'r') as fin:
found = []
avg = {}
prev = None
for line in fin:
line = line.rstrip()
if line.find(args.target) != -1 and (
args.skip_containing is None or line.find(args.skip_containing) == -1):
try:
idx = line.index("{")
line = line[idx:]
line_json = json.loads(line)
except:
continue
if prev is not None:
try:
prev.update(line_json)
line_json = prev
except:
pass
if args.target in line_json:
found.append(line_json)
if args.avg_params:
avg_params = args.avg_params.split(',')
for p in avg_params:
m = extract_metric(line, p)
if m is not None:
prev_v, prev_c = avg.get(p, (0, 0))
avg[p] = prev_v + float(m), prev_c + 1
if args.extract_prev:
try:
prev = json.loads(line)
except:
pass
best = None
if args.best:
curr_best = None
for i in range(len(found)):
cand_best = found[i][args.target] if args.target in found[i] else None
def cmp(a, b):
a = float(a)
b = float(b)
if args.best_biggest:
return a > b
return a < b
if cand_best is not None and not math.isnan(float(cand_best)) and (
curr_best is None or cmp(cand_best, curr_best)):
curr_best = cand_best
if abs_best is None or cmp(curr_best, abs_best):
abs_best = curr_best
best = found[i]
if args.unique_epochs or args.epoch:
last_found = []
last_epoch = None
for i in reversed(range(len(found))):
epoch = found[i]['epoch']
if args.epoch and args.epoch != epoch:
continue
if epoch != last_epoch:
last_epoch = epoch
last_found.append(found[i])
found = list(reversed(last_found))
if len(found) == 0:
if print_output and (args.last_files is not None or not args.skip_empty):
# print(root.split('/')[-1])
print(root[len(args.base):])
print('Nothing')
else:
if not print_output:
ret[root[len(args.base):]] = best
continue
if args.compact:
# print('{}\t{}'.format(root.split('/')[-1], curr_best))
print('{}\t{}'.format(root[len(args.base)+1:], curr_best))
continue
if args.group_on is None and not args.best_only:
# print(root.split('/')[-1])
print(root[len(args.base):])
if not args.everything:
if best is not None and args.group_on is None and not args.best_only and not args.flat:
print(best, '(best)')
if args.group_on is None and args.last and not args.best_only and not args.flat:
for f in found[-args.last:]:
if args.extract_prev is not None:
try:
print('{}\t{}'.format(f[args.extract_prev], f[args.target]))
except Exception as e:
print('Exception!', e)
else:
print(f)
try:
metric = found[-1][args.target] if not args.best or best is None else best[args.target]
except:
print(found[-1])
raise
if metric is not None:
entries.append((extract_params(root), metric))
else:
for f in found:
print(f)
if not args.group_on and print_output:
print()
if len(avg) > 0:
for k, (v, c) in avg.items():
print(f'{k}: {v/c}')
if args.best_only:
print(abs_best)
if args.flat:
print("\t".join(m for _, m in entries))
if args.group_on is not None:
by_val = OrderedDict()
for e, m in entries:
k = args.group_on
if k not in e:
m_keys = [x for x in e.keys() if x.startswith(k)]
if len(m_keys) == 0:
val = "False"
else:
assert len(m_keys) == 1
k = m_keys[0]
val = m_keys[0]
else:
val = e[args.group_on]
if val == "":
val = "True"
scrubbed_entry = copy.deepcopy(e)
if k in scrubbed_entry:
del scrubbed_entry[k]
if args.remove_metric and args.remove_metric in scrubbed_entry:
val += '_' + scrubbed_entry[args.remove_metric]
del scrubbed_entry[args.remove_metric]
by_val.setdefault(tuple(scrubbed_entry.items()), dict())[val] = m
distinct_vals = set()
for v in by_val.values():
distinct_vals.update(v.keys())
try:
distinct_vals = {int(d) for d in distinct_vals}
except:
print(distinct_vals)
print()
print("by_val", len(by_val))
for k,v in by_val.items():
print(k, '=>', v)
print()
# , by_val, entries)
raise
from natsort import natsorted
svals = list(map(str, natsorted(distinct_vals)))
print('{}\t{}'.format(args.group_on, '\t'.join(svals)))
sums = OrderedDict({n:[] for n in svals})
for k, v in by_val.items():
kstr = '.'.join(':'.join(x) for x in k)
vstr = ''
for mv in svals:
x = v[mv] if mv in v else ''
vstr += '\t{}'.format(round(x, 5) if isinstance(x, float) else x)
try:
sums[mv].append(float(x))
except:
pass
print('{}{}'.format(kstr[:args.key_len], vstr))
if any(len(x) > 0 for x in sums.values()):
print('min:', end='')
for v in sums.values():
min = np.min(v)
print(f'\t{round(min, 5)}', end='')
print()
print('max:', end='')
for v in sums.values():
max = np.max(v)
print(f'\t{round(max, 5)}', end='')
print()
print('avg:', end='')
for v in sums.values():
mean = np.mean(v)
print(f'\t{round(mean, 5)}', end='')
print()
print('median:', end='')
for v in sums.values():
median = np.median(v)
print(f'\t{round(median, 5)}', end='')
print()
return ret
if __name__ == "__main__":
args = parser.parse_args()
main(args, print_output=True) | EXA-1-master | exa/libraries/fairseq/examples/data2vec/scripts/text/valids.py |
import json
import os
import tqdm
from fairseq.data import Dictionary, data_utils
def load_dictionary(dict_path):
return Dictionary.load(dict_path)
def load_dataset(split_path, src_dict):
dataset = data_utils.load_indexed_dataset(
split_path,
src_dict,
combine=False, # set to true for loading `train*`
)
if dataset is None:
raise FileNotFoundError(f"Dataset not found: {split_path}")
return dataset
def load_bpe(enc_path):
with open(enc_path) as f:
bpe2idx = json.load(f)
idx2bpe = {v: k for k, v in bpe2idx.items()}
return bpe2idx, idx2bpe
def detokenize(tokens, src_dict, idx2bpe):
raw_inds = map(int, src_dict.string(tokens).split())
raw_chrs = "".join([idx2bpe[raw_ind] for raw_ind in raw_inds])
raw_chrs = raw_chrs.replace("\u0120", " ")
return raw_chrs
def _main(src_root, src_dict_path, src_bpe_path, src_splits, tgt_root, tgt_splits):
src_dict = load_dictionary(src_dict_path)
bpe2idx, idx2bpe = load_bpe(src_bpe_path)
assert len(src_splits) == len(tgt_splits)
for src_split, tgt_split in zip(src_splits, tgt_splits):
src_dataset = load_dataset(f"{src_root}/{src_split}", src_dict)
tgt_path = f"{tgt_root}/{tgt_split}.txt"
print(f"processing {src_split} (dump to {tgt_path})...")
os.makedirs(os.path.dirname(tgt_path), exist_ok=True)
with open(tgt_path, "w") as f:
for tokens in tqdm.tqdm(src_dataset):
raw_str = detokenize(tokens, src_dict, idx2bpe)
f.write(raw_str + "\n")
def main_pt():
src_root = "/datasets01/bookwiki_CC-NEWS_openwebtext_stories-mmap2-bin/121219/bookwiki_CC-NEWS_openwebtext_stories-mmap2-bin"
src_dict_path = f"{src_root}/dict.txt"
src_bpe_path = f"{src_root}/encoder.json"
src_splits = [
"bookwiki_aml-mmap2-bin/shard0/train",
"bookwiki_aml-mmap2-bin/shard1/train",
"bookwiki_aml-mmap2-bin/shard2/train",
"bookwiki_aml-mmap2-bin/shard3/train",
"bookwiki_aml-mmap2-bin/shard4/train",
"bookwiki_aml-mmap2-bin/valid/valid",
]
tgt_root = "/checkpoint/wnhsu/data/data2vec2/data/text/bookwiki_aml-full-mmap2-txt"
tgt_splits = [
"train0",
"train1",
"train2",
"train3",
"train4",
"valid",
]
_main(src_root, src_dict_path, src_bpe_path, src_splits, tgt_root, tgt_splits)
def main_ft():
src_root = "/fsx-wav2vec/wnhsu/data/data2vec2/data/text/GLUE"
src_dict_path = f"{src_root}/dict.txt"
src_bpe_path = f"{src_root}/encoder.json"
src_splits = [
"CoLA-bin/input0/train",
"CoLA-bin/input0/valid",
"CoLA-bin/input0/test",
"MNLI-bin/input0/train",
"MNLI-bin/input0/valid",
"MNLI-bin/input0/test",
"MNLI-bin/input0/test1",
"MNLI-bin/input1/train",
"MNLI-bin/input1/valid",
"MNLI-bin/input1/test",
"MNLI-bin/input1/test1",
"MRPC-bin/input0/train",
"MRPC-bin/input0/valid",
"MRPC-bin/input0/test",
"MRPC-bin/input1/train",
"MRPC-bin/input1/valid",
"MRPC-bin/input1/test",
"QNLI-bin/input0/train",
"QNLI-bin/input0/valid",
"QNLI-bin/input0/test",
"QNLI-bin/input1/train",
"QNLI-bin/input1/valid",
"QNLI-bin/input1/test",
"QQP-bin/input0/train",
"QQP-bin/input0/valid",
"QQP-bin/input0/test",
"QQP-bin/input1/train",
"QQP-bin/input1/valid",
"QQP-bin/input1/test",
"RTE-bin/input0/train",
"RTE-bin/input0/valid",
"RTE-bin/input0/test",
"RTE-bin/input1/train",
"RTE-bin/input1/valid",
"RTE-bin/input1/test",
"SST-2-bin/input0/train",
"SST-2-bin/input0/valid",
"SST-2-bin/input0/test",
"STS-B-bin/input0/train",
"STS-B-bin/input0/valid",
"STS-B-bin/input0/test",
"STS-B-bin/input1/train",
"STS-B-bin/input1/valid",
"STS-B-bin/input1/test",
]
tgt_root = "/fsx-wav2vec/wnhsu/data/data2vec2/data/text/GLUE_chr"
tgt_splits = [
"CoLA-bin/input0/train",
"CoLA-bin/input0/valid",
"CoLA-bin/input0/test",
"MNLI-bin/input0/train",
"MNLI-bin/input0/valid",
"MNLI-bin/input0/test",
"MNLI-bin/input0/test1",
"MNLI-bin/input1/train",
"MNLI-bin/input1/valid",
"MNLI-bin/input1/test",
"MNLI-bin/input1/test1",
"MRPC-bin/input0/train",
"MRPC-bin/input0/valid",
"MRPC-bin/input0/test",
"MRPC-bin/input1/train",
"MRPC-bin/input1/valid",
"MRPC-bin/input1/test",
"QNLI-bin/input0/train",
"QNLI-bin/input0/valid",
"QNLI-bin/input0/test",
"QNLI-bin/input1/train",
"QNLI-bin/input1/valid",
"QNLI-bin/input1/test",
"QQP-bin/input0/train",
"QQP-bin/input0/valid",
"QQP-bin/input0/test",
"QQP-bin/input1/train",
"QQP-bin/input1/valid",
"QQP-bin/input1/test",
"RTE-bin/input0/train",
"RTE-bin/input0/valid",
"RTE-bin/input0/test",
"RTE-bin/input1/train",
"RTE-bin/input1/valid",
"RTE-bin/input1/test",
"SST-2-bin/input0/train",
"SST-2-bin/input0/valid",
"SST-2-bin/input0/test",
"STS-B-bin/input0/train",
"STS-B-bin/input0/valid",
"STS-B-bin/input0/test",
"STS-B-bin/input1/train",
"STS-B-bin/input1/valid",
"STS-B-bin/input1/test",
]
_main(src_root, src_dict_path, src_bpe_path, src_splits, tgt_root, tgt_splits)
if __name__ == "__main__":
main_pt()
main_ft()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/scripts/text/unprocess_data.py |
import os.path as osp
import re
from collections import defaultdict
from valids import parser, main as valids_main
TASK_TO_METRIC = {
"cola": "mcc",
"qnli": "accuracy",
"mrpc": "acc_and_f1",
"rte": "accuracy",
"sst_2": "accuracy",
"mnli": "accuracy",
"qqp": "acc_and_f1",
"sts_b": "pearson_and_spearman",
}
TASKS = ["cola", "qnli", "mrpc", "rte", "sst_2", "mnli", "qqp", "sts_b"]
def get_best_stat_str(task_vals, show_subdir):
task_to_best_val = {}
task_to_best_dir = {}
for task, subdir_to_val in task_vals.items():
task_to_best_val[task] = max(subdir_to_val.values())
task_to_best_dir[task] = max(subdir_to_val.keys(), key=lambda x: subdir_to_val[x])
# import pdb; pdb.set_trace()
N1 = len(task_to_best_val)
N2 = len([k for k in task_to_best_val if k != "rte"])
avg1 = sum(task_to_best_val.values()) / N1
avg2 = sum(v for task, v in task_to_best_val.items() if task != "rte") / N2
try:
msg = ""
for task in TASKS:
dir = task_to_best_dir.get(task, 'null')
val = task_to_best_val.get(task, -100)
msg += f"({dir}, {val})\t" if show_subdir else f"{val}\t"
msg += f"{avg1:.2f}\t{avg2:.2f}"
except Exception as e:
msg = str(e)
msg += str(sorted(task_vals.items()))
return msg
def get_all_stat_str(task_vals):
msg = ""
for task in [task for task in TASKS if task in task_vals]:
msg += f"=== {task}\n"
for subdir in sorted(task_vals[task].keys()):
msg += f"\t{subdir}\t{task_vals[task][subdir]}\n"
return msg
def get_tabular_stat_str(task_vals):
"""assume subdir is <param>/run_*/0"""
msg = ""
for task in [task for task in TASKS if task in task_vals]:
msg += f"=== {task}\n"
param_to_runs = defaultdict(dict)
for subdir in task_vals[task]:
match = re.match("(.*)/(run_.*)/0", subdir)
assert match, "subdir"
param, run = match.groups()
param_to_runs[param][run] = task_vals[task][subdir]
params = sorted(param_to_runs, key=lambda x: float(x))
runs = sorted(set(run for runs in param_to_runs.values() for run in runs))
msg += ("runs:" + "\t".join(runs) + "\n")
msg += ("params:" + "\t".join(params) + "\n")
for param in params:
msg += "\t".join([str(param_to_runs[param].get(run, None)) for run in runs])
msg += "\n"
# for subdir in sorted(task_vals[task].keys()):
# msg += f"\t{subdir}\t{task_vals[task][subdir]}\n"
return msg
def main():
parser.add_argument("--show_glue", action="store_true", help="show glue metric for each task instead of accuracy")
parser.add_argument("--print_mode", default="best", help="best|all|tabular")
parser.add_argument("--show_subdir", action="store_true", help="print the subdir that has the best results for each run")
parser.add_argument("--override_target", default="valid_accuracy", help="override target")
args = parser.parse_args()
args.target = args.override_target
args.best_biggest = True
args.best = True
args.last = 0
args.path_contains = None
res = valids_main(args, print_output=False)
grouped_acc = {}
grouped_met = {} # use official metric for each task
for path, v in res.items():
path = "/".join([args.base, path])
path = re.sub("//*", "/", path)
match = re.match("(.*)finetune[^/]*/([^/]*)/(.*)", path)
if not match:
continue
run, task, subdir = match.groups()
if run not in grouped_acc:
grouped_acc[run] = {}
grouped_met[run] = {}
if task not in grouped_acc[run]:
grouped_acc[run][task] = {}
grouped_met[run][task] = {}
if v is not None:
grouped_acc[run][task][subdir] = float(v.get("valid_accuracy", -100))
grouped_met[run][task][subdir] = float(v.get(f"valid_{TASK_TO_METRIC[task]}", -100))
else:
print(f"{path} has None return")
header = "\t".join(TASKS)
for run in sorted(grouped_acc):
print(run)
if args.print_mode == "all":
if args.show_glue:
print("===== GLUE =====")
print(get_all_stat_str(grouped_met[run]))
else:
print("===== ACC =====")
print(get_all_stat_str(grouped_acc[run]))
elif args.print_mode == "best":
print(f" {header}")
if args.show_glue:
print(f"GLEU: {get_best_stat_str(grouped_met[run], args.show_subdir)}")
else:
print(f"ACC: {get_best_stat_str(grouped_acc[run], args.show_subdir)}")
elif args.print_mode == "tabular":
if args.show_glue:
print("===== GLUE =====")
print(get_tabular_stat_str(grouped_met[run]))
else:
print("===== ACC =====")
print(get_tabular_stat_str(grouped_acc[run]))
else:
raise ValueError(args.print_mode)
print()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/scripts/text/glue_lr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import logging
import math
import random
import time
import numpy as np
import os
import torch
from torchvision import datasets, transforms
from .path_dataset import PathDataset
from fairseq.data import FairseqDataset
from fairseq.data.data_utils import compute_block_mask_1d, compute_block_mask_2d
from shutil import copyfile
logger = logging.getLogger(__name__)
def load(path, loader, cache):
if hasattr(caching_loader, "cache_root"):
cache = caching_loader.cache_root
cached_path = cache + path
num_tries = 3
for curr_try in range(num_tries):
try:
if curr_try == 2:
return loader(path)
if not os.path.exists(cached_path) or curr_try > 0:
os.makedirs(os.path.dirname(cached_path), exist_ok=True)
copyfile(path, cached_path)
os.chmod(cached_path, 0o777)
return loader(cached_path)
except Exception as e:
logger.warning(str(e))
if "Errno 13" in str(e):
caching_loader.cache_root = f"/scratch/{random.randint(0, 69420)}"
logger.warning(f"setting cache root to {caching_loader.cache_root}")
cached_path = caching_loader.cache_root + path
if curr_try == (num_tries - 1):
raise
time.sleep(2)
def caching_loader(cache_root: str, loader):
if cache_root is None:
return loader
if cache_root == "slurm_tmpdir":
cache_root = os.environ["SLURM_TMPDIR"]
assert len(cache_root) > 0
if not cache_root.endswith("/"):
cache_root += "/"
return partial(load, loader=loader, cache=cache_root)
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(
self,
size,
second_size=None,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation="bilinear",
second_interpolation="lanczos",
):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
logger.warning("range should be of kind (min, max)")
if interpolation == "random":
from PIL import Image
self.interpolation = (Image.BILINEAR, Image.BICUBIC)
else:
self.interpolation = self._pil_interp(interpolation)
self.second_interpolation = (
self._pil_interp(second_interpolation)
if second_interpolation is not None
else None
)
self.scale = scale
self.ratio = ratio
def _pil_interp(self, method):
from PIL import Image
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
import torchvision.transforms.functional as F
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(
img, i, j, h, w, self.size, interpolation
), F.resized_crop(
img, i, j, h, w, self.second_size, self.second_interpolation
)
class MaeImageDataset(FairseqDataset):
def __init__(
self,
root: str,
split: str,
input_size,
local_cache_path=None,
shuffle=True,
key="imgs",
beit_transforms=False,
target_transform=False,
no_transform=False,
compute_mask=False,
patch_size: int = 16,
mask_prob: float = 0.75,
mask_prob_adjust: float = 0,
mask_length: int = 1,
inverse_mask: bool = False,
expand_adjacent: bool = False,
mask_dropout: float = 0,
non_overlapping: bool = False,
require_same_masks: bool = True,
clone_batch: int = 1,
dataset_type: str = "imagefolder",
):
FairseqDataset.__init__(self)
self.shuffle = shuffle
self.key = key
loader = caching_loader(local_cache_path, datasets.folder.default_loader)
self.transform_source = None
self.transform_target = None
if target_transform:
self.transform_source = transforms.ColorJitter(0.4, 0.4, 0.4)
self.transform_target = transforms.ColorJitter(0.4, 0.4, 0.4)
if no_transform:
if input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(input_size / crop_pct)
self.transform_train = transforms.Compose(
[
transforms.Resize(size, interpolation=3),
transforms.CenterCrop(input_size),
]
)
self.transform_train = transforms.Resize((input_size, input_size))
elif beit_transforms:
beit_transform_list = []
if not target_transform:
beit_transform_list.append(transforms.ColorJitter(0.4, 0.4, 0.4))
beit_transform_list.extend(
[
transforms.RandomHorizontalFlip(p=0.5),
RandomResizedCropAndInterpolationWithTwoPic(
size=input_size,
second_size=None,
interpolation="bicubic",
second_interpolation=None,
),
]
)
self.transform_train = transforms.Compose(beit_transform_list)
else:
self.transform_train = transforms.Compose(
[
transforms.RandomResizedCrop(
input_size, scale=(0.2, 1.0), interpolation=3
), # 3 is bicubic
transforms.RandomHorizontalFlip(),
]
)
self.final_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
if dataset_type == "imagefolder":
self.dataset = datasets.ImageFolder(
os.path.join(root, split), loader=loader
)
elif dataset_type == "path":
self.dataset = PathDataset(
root,
loader,
None,
None,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
else:
raise Exception(f"invalid dataset type {dataset_type}")
logger.info(
f"initial transform: {self.transform_train}, "
f"source transform: {self.transform_source}, "
f"target transform: {self.transform_target}, "
f"final transform: {self.final_transform}"
)
logger.info(f"loaded {len(self.dataset)} examples")
self.is_compute_mask = compute_mask
self.patches = (input_size // patch_size) ** 2
self.mask_prob = mask_prob
self.mask_prob_adjust = mask_prob_adjust
self.mask_length = mask_length
self.inverse_mask = inverse_mask
self.expand_adjacent = expand_adjacent
self.mask_dropout = mask_dropout
self.non_overlapping = non_overlapping
self.require_same_masks = require_same_masks
self.clone_batch = clone_batch
def __getitem__(self, index):
img, _ = self.dataset[index]
img = self.transform_train(img)
source = None
target = None
if self.transform_source is not None:
source = self.final_transform(self.transform_source(img))
if self.transform_target is not None:
target = self.final_transform(self.transform_target(img))
if source is None:
img = self.final_transform(img)
v = {"id": index, self.key: source if source is not None else img}
if target is not None:
v["target"] = target
if self.is_compute_mask:
if self.mask_length == 1:
mask = compute_block_mask_1d(
shape=(self.clone_batch, self.patches),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
mask_prob_adjust=self.mask_prob_adjust,
inverse_mask=self.inverse_mask,
require_same_masks=True,
)
else:
mask = compute_block_mask_2d(
shape=(self.clone_batch, self.patches),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
mask_prob_adjust=self.mask_prob_adjust,
inverse_mask=self.inverse_mask,
require_same_masks=True,
expand_adjcent=self.expand_adjacent,
mask_dropout=self.mask_dropout,
non_overlapping=self.non_overlapping,
)
v["precomputed_mask"] = mask
return v
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if len(samples) == 0:
return {}
collated_img = torch.stack([s[self.key] for s in samples], dim=0)
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {
self.key: collated_img,
},
}
if "target" in samples[0]:
collated_target = torch.stack([s["target"] for s in samples], dim=0)
res["net_input"]["target"] = collated_target
if "precomputed_mask" in samples[0]:
collated_mask = torch.cat([s["precomputed_mask"] for s in samples], dim=0)
res["net_input"]["precomputed_mask"] = collated_mask
return res
def num_tokens(self, index):
return 1
def size(self, index):
return 1
@property
def sizes(self):
return np.full((len(self),), 1)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
return order[0]
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/mae_image_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import BaseWrapperDataset, data_utils
class AddClassTargetDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
labels,
multi_class,
num_classes=None,
label_indices=None,
add_to_input=True,
):
super().__init__(dataset)
self.label_indices = label_indices
self.labels = labels
self.multi_class = multi_class
self.add_to_input = add_to_input
if num_classes is None and multi_class:
assert self.label_indices is not None
num_classes = len(self.label_indices)
self.num_classes = num_classes
def __getitem__(self, index):
item = self.dataset[index]
item_labels = self.labels[index]
if self.multi_class:
item["label"] = torch.zeros(self.num_classes)
for il in item_labels:
if self.label_indices is not None:
il = self.label_indices[il]
item["label"][il] = 1.0
else:
item["label"] = torch.tensor(
self.labels[index]
if self.label_indices is None
else self.label_indices[self.labels[index]]
)
return item
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
target = [s["label"] for s in samples if s["id"] in indices]
collated["label"] = torch.stack(target, dim=0)
if self.add_to_input:
collated["net_input"]["label"] = collated["label"]
return collated
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/add_class_target_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import os
from typing import Optional, Callable, Set
import torch
from torchvision.datasets.vision import VisionDataset
from torchvision.transforms import ToTensor
from fairseq.data import FairseqDataset
logger = logging.getLogger(__name__)
class ImageDataset(FairseqDataset, VisionDataset):
def __init__(
self,
root: str,
extensions: Set[str],
load_classes: bool,
transform: Optional[Callable] = None,
shuffle=True,
):
FairseqDataset.__init__(self)
VisionDataset.__init__(self, root=root, transform=transform)
self.shuffle = shuffle
self.tensor_transform = ToTensor()
self.classes = None
self.labels = None
if load_classes:
classes = [d.name for d in os.scandir(root) if d.is_dir()]
classes.sort()
self.classes = {cls_name: i for i, cls_name in enumerate(classes)}
logger.info(f"loaded {len(self.classes)} classes")
self.labels = []
def walk_path(root_path):
for root, _, fnames in sorted(os.walk(root_path, followlinks=True)):
for fname in sorted(fnames):
fname_ext = os.path.splitext(fname)
if fname_ext[-1].lower() not in extensions:
continue
path = os.path.join(root, fname)
yield path
logger.info(f"finding images in {root}")
if self.classes is not None:
self.files = []
self.labels = []
for c, i in self.classes.items():
for f in walk_path(os.path.join(root, c)):
self.files.append(f)
self.labels.append(i)
else:
self.files = [f for f in walk_path(root)]
logger.info(f"loaded {len(self.files)} examples")
def __getitem__(self, index):
from PIL import Image
fpath = self.files[index]
with open(fpath, "rb") as f:
img = Image.open(f).convert("RGB")
if self.transform is None:
img = self.tensor_transform(img)
else:
img = self.transform(img)
assert torch.is_tensor(img)
res = {"id": index, "img": img}
if self.labels is not None:
res["label"] = self.labels[index]
return res
def __len__(self):
return len(self.files)
def collater(self, samples):
if len(samples) == 0:
return {}
collated_img = torch.stack([s["img"] for s in samples], dim=0)
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {
"img": collated_img,
},
}
if "label" in samples[0]:
res["net_input"]["label"] = torch.LongTensor([s["label"] for s in samples])
return res
def num_tokens(self, index):
return 1
def size(self, index):
return 1
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
return order[0]
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/image_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .image_dataset import ImageDataset
from .path_dataset import PathDataset
from .mae_image_dataset import MaeImageDataset
from .mae_finetuning_image_dataset import MaeFinetuningImageDataset
__all__ = [
"ImageDataset",
"MaeImageDataset",
"MaeFinetuningImageDataset",
"PathDataset",
] | EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import os
import torch
from torchvision import datasets, transforms
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import PIL
from fairseq.data import FairseqDataset
from .mae_image_dataset import caching_loader
logger = logging.getLogger(__name__)
def build_transform(is_train, input_size, color_jitter, aa, reprob, remode, recount):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
# train transform
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=input_size,
is_training=True,
color_jitter=color_jitter,
auto_augment=aa,
interpolation="bicubic",
re_prob=reprob,
re_mode=remode,
re_count=recount,
mean=mean,
std=std,
)
return transform
# eval transform
t = []
if input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(input_size / crop_pct)
t.append(
transforms.Resize(
size, interpolation=PIL.Image.BICUBIC
), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
class MaeFinetuningImageDataset(FairseqDataset):
def __init__(
self,
root: str,
split: str,
is_train: bool,
input_size,
color_jitter=None,
aa="rand-m9-mstd0.5-inc1",
reprob=0.25,
remode="pixel",
recount=1,
local_cache_path=None,
shuffle=True,
):
FairseqDataset.__init__(self)
self.shuffle = shuffle
transform = build_transform(
is_train, input_size, color_jitter, aa, reprob, remode, recount
)
path = os.path.join(root, split)
loader = caching_loader(local_cache_path, datasets.folder.default_loader)
self.dataset = datasets.ImageFolder(path, loader=loader, transform=transform)
logger.info(f"loaded {len(self.dataset)} examples")
def __getitem__(self, index):
img, label = self.dataset[index]
return {"id": index, "img": img, "label": label}
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if len(samples) == 0:
return {}
collated_img = torch.stack([s["img"] for s in samples], dim=0)
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {
"imgs": collated_img,
},
}
if "label" in samples[0]:
res["net_input"]["labels"] = torch.LongTensor([s["label"] for s in samples])
return res
def num_tokens(self, index):
return 1
def size(self, index):
return 1
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
return order[0]
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/mae_finetuning_image_dataset.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from enum import Enum, auto
class Modality(Enum):
AUDIO = auto()
IMAGE = auto()
TEXT = auto()
| EXA-1-master | exa/libraries/fairseq/examples/data2vec/data/modality.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.