python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from fairseq.data import Dictionary
def get_parser():
parser = argparse.ArgumentParser(
description="filters a lexicon given a unit dictionary"
)
parser.add_argument("-d", "--unit-dict", help="unit dictionary", required=True)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
d = Dictionary.load(args.unit_dict)
symbols = set(d.symbols)
for line in sys.stdin:
items = line.rstrip().split()
skip = len(items) < 2
for x in items[1:]:
if x not in symbols:
skip = True
break
if not skip:
print(line, end="")
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/filter_lexicon.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True)
parser.add_argument('--batch-size', type=int, default=2048000, help='batch size')
parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
data_poth = source_path + "_unfiltered" if args.unfiltered else source_path
print(f"data path: {data_poth}")
features = np.load(data_poth + ".npy", mmap_mode="r")
pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda()
pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda()
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
copyfile(data_poth + ".lengths", save_path + ".lengths")
if osp.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if osp.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
batches = math.ceil(features.shape[0] / args.batch_size)
with torch.no_grad():
for b in tqdm.trange(batches):
start = b * args.batch_size
end = start + args.batch_size
x = torch.from_numpy(features[start:end]).cuda()
x = torch.matmul(x, pca_A) + pca_b
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/apply_pca.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from g2p_en import G2p
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--compact",
action="store_true",
help="if set, compacts phones",
)
args = parser.parse_args()
compact = args.compact
wrd_to_phn = {}
g2p = G2p()
for line in sys.stdin:
words = line.strip().split()
phones = []
for w in words:
if w not in wrd_to_phn:
wrd_to_phn[w] = g2p(w)
if compact:
wrd_to_phn[w] = [
p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w]
]
phones.extend(wrd_to_phn[w])
try:
print(" ".join(phones))
except:
print(wrd_to_phn, words, phones, file=sys.stderr)
raise
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder
paths=shards/train.tsv
vads=shards/train.vads
python remove_silence.py --paths $paths --vads $vads
"""
import os
import argparse
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", default="", type=str)
parser.add_argument("--vads", default="", type=str)
parser.add_argument("--out", type=str)
params = parser.parse_args()
# load paths
paths = []
with open(params.tsv) as f:
root = next(f).rstrip()
for line in f:
paths.append(os.path.join(root, line.rstrip().split("\t")[0]))
# load vads
list_intervals = []
with open(params.vads) as f:
for line in f:
interval = [
[int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split()
]
list_intervals.append(interval)
# load audio and keep only intervals (i.e. remove silences)
for i in tqdm.trange(len(paths)):
data, _ = torchaudio.load(paths[i])
if len(list_intervals[i]) > 0:
data_filtered = torch.cat(
[data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]]
).unsqueeze(0)
else:
data_filtered = data
# YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH
# outpath = params.out + '/'.join(paths[i].split('/')[-1])
outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:])
if not os.path.isdir("/".join(outpath.split("/")[:-1])):
os.makedirs("/".join(outpath.split("/")[:-1]))
if not os.path.exists(outpath):
torchaudio.save(outpath, data_filtered, sample_rate=16000)
else:
print(outpath, "exists!")
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/remove_silence.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True)
parser.add_argument('--layer', type=int, default=14, help='which layer to use')
# fmt: on
return parser
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[cp_file]
)
model = model[0]
model.eval()
model.cuda()
self.model = model
self.task = task
self.layer = layer
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
assert source.dim() == 1, source.dim()
with torch.no_grad():
source = F.layer_norm(source, source.shape)
source = source.view(1, -1)
m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer)
return m_res["x"].squeeze(0).cpu()
def get_iterator(args):
with open(osp.join(args.data, args.split) + ".tsv", "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
w2v_feats = reader.get_feats(fname)
yield w2v_feats
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
def create_files(dest):
copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv")
if osp.exists(osp.join(args.data, args.split) + ".wrd"):
copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd")
if osp.exists(osp.join(args.data, args.split) + ".phn"):
copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn")
if osp.exists(dest + ".npy"):
os.remove(dest + ".npy")
npaa = NpyAppendArray(dest + ".npy")
return npaa
save_path = osp.join(args.save_dir, args.split)
npaa = create_files(save_path)
generator, num = get_iterator(args)
iterator = generator()
with open(save_path + ".lengths", "w") as l_f:
for w2v_feats in tqdm.tqdm(iterator, total=num):
print(len(w2v_feats), file=l_f)
if len(w2v_feats) > 0:
npaa.append(w2v_feats.numpy())
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .extracted_features_dataset import ExtractedFeaturesDataset
from .random_input_dataset import RandomInputDataset
__all__ = [
"ExtractedFeaturesDataset",
"RandomInputDataset",
]
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
aux_target_postfix=None,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
self.aux_tgt = None
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
if aux_target_postfix is not None:
if not os.path.exists(path+f".{aux_target_postfix}"):
logger.info(f"auxaliry target for {split} missing")
else:
with open(path+f".{aux_target_postfix}", "r") as t_f:
self.aux_tgt = [
torch.LongTensor(list(map(int,seg.strip().split())))\
for seg in t_f]
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
if self.aux_tgt:
res["aux_target"] = self.aux_tgt[index]
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
if self.aux_tgt:
idxs = torch.nn.utils.rnn.pad_sequence(
[s["aux_target"] for s in samples],
batch_first=True,
padding_value=-1,
)
res["net_input"]["aux_target"] = idxs
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List
from fairseq.data import BaseWrapperDataset, data_utils
class RandomInputDataset(BaseWrapperDataset):
def __init__(
self,
dataset,
random_input_dataset,
input_key_path: List[str],
add_to_input,
pad_idx,
):
super().__init__(dataset)
self.random_input_dataset = random_input_dataset
if isinstance(input_key_path, str):
input_key_path = [input_key_path]
assert len(input_key_path) > 0
self.input_key_path = input_key_path
self.add_to_input = add_to_input
self.pad_idx = pad_idx
def get_target(self, item):
target_loc = item
for p in self.input_key_path[:-1]:
target_loc = target_loc[p]
return self.input_key_path[-1], target_loc
def get_target_value(self, item):
k, target_loc = self.get_target(item)
return target_loc[k]
def __getitem__(self, index):
item = self.dataset[index]
k, target_loc = self.get_target(item)
target_loc[k] = random.choice(self.random_input_dataset)
return item
def collater(self, samples):
collated = self.dataset.collater(samples)
if len(collated) == 0:
return collated
indices = set(collated["id"].tolist())
random_inputs = data_utils.collate_tokens(
[self.get_target_value(s) for s in samples if s["id"] in indices],
pad_idx=self.pad_idx,
left_pad=False,
)
k, target_loc = self.get_target(
collated if not self.add_to_input else collated["net_input"]
)
target_loc[k] = random_inputs
return collated
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/unsupervised/data/random_input_dataset.py |
"""
Usage:
This script is used to extract the embedding / logit for speech classification task.
1. Set fdir into your model checkpoint directory
2. Run the following command (preferrably on GPU machine to speed up the inference process)
CUDA_VISIBLE_DEVICES=0 python3 examples/wav2vec/gen_audio_embedding.py /fsx/data/VoxLingua107/manifest --path ${fdir} \
--task audio_classification --batch-size 90 --gen-subset test \
--infer-manifest /fsx/data/VoxLingua107/manifest/test.tsv \
--infer-xtimes 10 --infer-max-sample-size 160000 --output-path $odir
Example:
Case: LID logit extraction
fdir='/fsx/androstj/exps/voxlingua_lid_train_all/ckpt_100pct_300m_voxling-act_linear-pool_mean_fast-lr_1e-4-phase_0.1_0.4_0.5-maxupd_100000-ufreq_1-mprob_0.5-fz_0-cr_softmax/0/checkpoints/checkpoint_best.pt'
python3 examples/wav2vec/gen_audio_embedding.py /fsx/data/VoxLingua107/manifest --path ${fdir} \
--task audio_classification --batch-size 90 --gen-subset test \
--infer-manifest /fsx/data/VoxLingua107/manifest/test.tsv \
--infer-xtimes 10 --infer-max-sample-size 160000 --output-path $odir
"""
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from fairseq import checkpoint_utils, data, options, tasks
from fairseq.data import FileAudioDataset, AddTargetDataset, Dictionary
from fairseq.tasks.audio_classification import LabelEncoder
import ipdb
import copy
import sys
from tqdm import tqdm
import tempfile
import numpy as np
import sklearn
def subset_manifest(infer_manifest, veri_pair):
with open(infer_manifest) as ff, open(veri_pair) as gg, \
tempfile.NamedTemporaryFile('w', delete=False) as ww:
fnames = ff.read().strip().split("\n")
basedir = fnames[0]
needed_fname = []
for gi in gg.read().strip().split('\n'):
_, x1, x2 = gi.split()
needed_fname.append(x1)
needed_fname.append(x2)
needed_fname = set(needed_fname)
ww.write(basedir+'\n')
for ii in range(1, len(fnames)):
x1,x2 = fnames[ii].split()
if x1 in needed_fname:
ww.write(fnames[ii]+'\n')
print(f'| subset manifest for verification: {ww.name}')
return ww.name
def wrap_target_dataset(infer_manifest, dataset, task):
label_path = infer_manifest.replace(".tsv", ".label")
with open(label_path, "r") as f:
labels = f.read().strip().split("\n")
assert len(labels) == len(dataset)
process_label = LabelEncoder(task.target_dictionary)
dataset = AddTargetDataset(dataset, labels,
pad=task.target_dictionary.pad(),
eos=task.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
add_to_input=False)
return dataset
def resample_data(source, padding_mask, n_sample, max_sample_len):
# source: BxT
# padding_mask: BxT
B = source.shape[0]
T = source.shape[1]
sources = []
padding_masks = []
seq_len = (~padding_mask).sum(1)
for jj in range(n_sample):
new_source = source.new_zeros(B, max_sample_len)
new_padding_mask = padding_mask.new_zeros(B, max_sample_len)
for ii in range(B):
if seq_len[ii] > max_sample_len:
start = np.random.randint(0, seq_len[ii]-max_sample_len+1)
end = start + max_sample_len
else :
start = 0
end = seq_len[ii]
new_source[ii, 0:end-start] = source[ii, start:end]
new_padding_mask[ii, end-start+1:] = True
sources.append(new_source)
padding_masks.append(new_padding_mask)
return sources, padding_masks
def resample_sample(sample, n_sample, max_sample_len):
new_sources, new_padding_masks = resample_data(sample['net_input']['source'], sample['net_input']['padding_mask'], n_sample, max_sample_len)
new_samples = []
for ii in range(n_sample):
new_sample = copy.deepcopy(sample)
new_sample['net_input']['source'] = new_sources[ii]
new_sample['net_input']['padding_mask'] = new_padding_masks[ii]
new_samples.append(new_sample)
return new_samples
if __name__ == '__main__':
np.random.seed(123)
# Parse command-line arguments for generation
parser = options.get_generation_parser(default_task='audio_classification')
# parser.add_argument('--infer-merge', type=str, default='mean')
parser.add_argument('--infer-xtimes', type=int, default=1)
parser.add_argument('--infer-max-sample-size', type=int, default=5*16000) # 5 secs
parser.add_argument('--infer-manifest', type=str)
parser.add_argument('--verification-pair', type=str, required=False,
help='''
a file that contains pairs of utts to evaluated if they are from same speaker or not
format: (following voxceleb)
1/0 <wav_pair_a> <wav_pair_b>
''')
parser.add_argument('--output-path', type=str)
# parser.add_argument('--infer-xtimes', type=int, default=1)
args = options.parse_args_and_arch(parser)
# Setup task
# task = tasks.setup_task(args)
use_cuda = not args.cpu
# Load model & task
print('| loading model from {}'.format(args.path))
arg_overrides = {
'data': args.data,
# 'mask_prob': 0
#'max_sample_size': sys.maxsize,
#'min_sample_size': 0,
}
state = checkpoint_utils.load_checkpoint_to_cpu(args.path)
# move to AWS
state['cfg']['model']['w2v_path'] = state['cfg']['model']['w2v_path'].replace('/checkpoint/arbabu/XLSR2/model_versions/', '/fsx/data/model_versions/').replace('/checkpoint/kushall/final_model_checkpoints/wav2vec2/', '/fsx/data/wav2vec_ckpt/')
state['cfg']['task']['data'] = state['cfg']['task']['data'].replace('/checkpoint/kushall/data/', '/fsx/data/')
models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task([args.path],
arg_overrides=arg_overrides,
task=None,
state=state)
model = models[0]
model.eval()
if use_cuda:
model.cuda()
# Load dataset
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
infer_manifest = args.infer_manifest
# only decode needed utts
# infer_manifest = subset_manifest(infer_manifest,
# args.verification_pair)
infer_dataset = FileAudioDataset(infer_manifest,
sample_rate=task.cfg.sample_rate,
max_sample_size=10**10, #task.cfg.max_sample_size,
min_sample_size=1, #task.cfg.min_sample_size,
pad=True,
normalize=task.cfg.normalize)
# add target (if needed)
infer_dataset = wrap_target_dataset(infer_manifest, infer_dataset, task)
itr = task.get_batch_iterator(
dataset=infer_dataset,
max_sentences=args.batch_size,
).next_epoch_itr(shuffle=False)
# correct = 0
# total = 0
list_uttname = []
list_latent = []
list_logit = []
list_target = []
list_src_len = []
with torch.no_grad():
for _, sample in tqdm(enumerate(itr)):
# resample if needed
samples = resample_sample(sample, args.infer_xtimes, args.infer_max_sample_size)
list_uttname.extend(sample['name'])
list_target.extend(sample['target'][:, 0].cpu().numpy())
list_src_len.extend((~sample['net_input']['padding_mask']).sum(1).cpu().numpy())
latents = []
logits = []
for sample in samples:
sample = utils.move_to_cuda(sample) if use_cuda else sample
try:
latent = model.forward_latent(**sample['net_input'])
latents.append(latent.detach().cpu().numpy())
except:
latent = None
logit = model.forward(**sample['net_input'])
logits.append(logit.detach().cpu().numpy())
if len(latents) > 0:
latents = np.stack(latents, 1) # B,X,D
logits = np.stack(logits, 1) # B,X,Cls
list_latent.extend(latents)
list_logit.extend(logits)
# create big npz
list_uttname = np.array(list_uttname)
list_latent = np.array(list_latent)
list_target = np.array(list_target)
list_logit = np.array(list_logit)
list_src_len = np.array(list_src_len)
# save to npz
output_path = args.output_path
if (output_path is None):
output_path = tempfile.NamedTemporaryFile('wb', delete=False).name
with open(output_path, 'wb') as ww:
np.savez(ww, name=list_uttname,
latent=list_latent,
target=list_target,
logit=list_logit,
src_len=list_src_len)
print("="*10 + " REPORT " + "="*10)
print(f'| latent saved in {output_path}')
print(f'| {list_uttname.shape=}, {list_latent.shape=}, {list_target.shape=}, {list_logit.shape=}, {list_src_len.shape=}')
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/xlsr/scripts/gen_audio_embedding.py |
"""
Usage:
This scripts it to evaluate the classification accuracy/error rate from the embedding extracted
by gen_audio_embedding.py
Example (LID classification)
PYTHONPATH='.' python examples/wav2vec/eval_speaker_clf_task.py \
--data /fsx/androstj/exps/lid_voxlingua/infer/atj_xlsr2_100pct_300M_mean_fast_upd_100k_new.npz \
--task cls --merge mean_logit
"""
import numpy as np
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import ipdb
import logging
import argparse
from scipy.special import softmax
log=logging.getLogger(__name__)
log.setLevel(logging.INFO)
def calculate_eer(y_label, y_score):
# y denotes groundtruth scores,
# y_score denotes the prediction scores.
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
fpr, tpr, thresholds = roc_curve(y_label, y_score, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
optimal_threshold = interp1d(fpr, thresholds)(eer)
return eer, optimal_threshold
def calculate_minDCF(y_label, y_score, p_target=0.01, c_miss=1, c_fa=1):
# https://github.com/kaldi-asr/kaldi/blob/master/egs/sre08/v1/sid/compute_min_dcf.py
from sklearn.metrics import det_curve
fpr, fnr, thresholds = det_curve(y_label, y_score, pos_label=1)
min_c_det = float("inf")
min_c_det_threshold = thresholds[0]
for i in range(0, len(fpr)):
# See Equation (2). it is a weighted sum of false negative
# and false positive errors.
c_det = c_miss * fnr[i] * p_target + c_fa * fpr[i] * (1 - p_target)
if c_det < min_c_det:
min_c_det = c_det
min_c_det_threshold = thresholds[i]
# See Equations (3) and (4). Now we normalize the cost.
c_def = min(c_miss * p_target, c_fa * (1 - p_target))
min_dcf = min_c_det / c_def
return min_dcf, min_c_det_threshold
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='npz contains name & latent file')
parser.add_argument('--task', choices=['cls', 'veri', 'cls_voxlingua'])
parser.add_argument('--merge', choices=['mean_logit', 'first_logit', 'mean_latent_sim', 'first_latent_sim', 'mean_logit_sim', 'first_logit_sim'])
parser.add_argument('--veri-pair', help='verification file contains 1/0 utt_x utt_y')
parser.add_argument('--scaler', type=str, choices=['mean_var'])
parser.add_argument('--compress-method', choices=['pca'])
parser.add_argument('--compress-dim', type=int)
args = parser.parse_args()
if args.task in ['cls', 'cls_voxlingua']:
print('| run classification evaluation')
data = np.load(args.data)
data_logit = data['logit']
data_target = data['target']
data_src_len = data['src_len']
assert data_logit.shape[0] == data_target.shape[0]
B = data_logit.shape[0]
correct = 0
total = 0
data_prob = softmax(data_logit, axis=2)
correct_vs_len = np.empty((B, 2))
for ii in range(B):
_target = data_target[ii]
if args.merge == 'mean_logit':
_prob = np.mean(data_prob[ii], axis=0)
top_1 = np.argmax(_prob)
elif args.merge == 'first_logit':
_prob = data_prob[ii][0]
top_1 = np.argmax(_prob)
else :
raise ValueError()
is_top_1 = (1 if top_1 == _target else 0)
correct += is_top_1
total += 1
_src_len = data_src_len[ii] / 16000
correct_vs_len[ii] = [is_top_1, _src_len]
acc = correct / total * 100
t_5 = correct_vs_len[:, 1] <= 5
t_20 = correct_vs_len[:, 1] > 5
c_5 = correct_vs_len[t_5, 0].sum()
c_20 = correct_vs_len[t_20, 0].sum()
t_5 = t_5.sum()
t_20 = t_20.sum()
acc_5 = c_5 / t_5 * 100
acc_20 = c_20 / t_20 * 100
print(f'| acc = {acc:.2f}% -- err = {100-acc:.2f}% -- {correct=} {total=}')
print(f'| acc 0to5 = {acc_5:.2f}% -- err = {100-acc_5:.2f}% -- {c_5=} {t_5=}')
print(f'| acc 5to20 = {acc_20:.2f}% -- err = {100-acc_20:.2f}% -- {c_20=} {t_20=}')
if args.task == 'veri':
print('| run verification evaluation')
veri_pairs = []
with open(args.veri_pair) as ff:
for fi in ff:
a,b,c = fi.split()
a = int(a)
veri_pairs.append([a,b,c])
data = np.load(args.data)
if 'logit' in args.merge:
data_latent = data['logit']
elif 'latent' in args.merge:
data_latent = data['latent']
else :
raise ValueError()
data_name = data['name']
assert len(data_name) == len(data_latent)
map_name_latent = {}
from sklearn.pipeline import make_pipeline
pipe = []
if args.scaler == 'mean_var':
print(f'| apply StandardScaler')
pipe.append(StandardScaler())
if args.compress_method == 'pca':
n_comp = args.compress_dim
print(f'| apply PCA with {n_comp=}')
from sklearn.decomposition import PCA
pipe.append(PCA(n_components=n_comp))
if len(pipe) > 0 :
pipe = make_pipeline(*pipe)
data_latent_2d = data_latent.reshape(-1, data_latent.shape[-1])
pipe.fit(data_latent_2d)
data_latent_2d = pipe.transform(data_latent_2d)
data_latent = data_latent_2d.reshape(data_latent.shape[0], data_latent.shape[1], -1)
for ii in range(len(data_name)):
map_name_latent[data_name[ii]] = data_latent[ii]
labels = []
scores = []
for lbl, pair_a, pair_b in tqdm(veri_pairs):
labels.append(lbl)
pair_a = map_name_latent[pair_a]
pair_b = map_name_latent[pair_b]
assert pair_a.ndim == pair_b.ndim == 2
score = cosine_similarity(pair_a, pair_b)
if args.merge.startswith('mean'):
score = np.mean(score)
elif args.merge.startswith('first'):
score = score[0, 0]
else :
raise ValueError()
scores.append(score)
labels = np.array(labels)
scores = np.array(scores)
eer, eer_threshold = calculate_eer(labels, scores)
minDCF, minDCF_threshold = calculate_minDCF(labels, scores)
print('='*40)
print(f'| EER = {eer*100:.2f}%\tthreshold = {eer_threshold:.2f}')
print(f'| minDCF = {minDCF:.2f}\tthreshold = {minDCF_threshold:.2f}')
| EXA-1-master | exa/libraries/fairseq/examples/wav2vec/xlsr/scripts/eval_speaker_clf_task.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help="path to encoder.json",
)
parser.add_argument(
"--vocab-bpe",
type=str,
help="path to vocab.bpe",
)
parser.add_argument(
"--inputs",
nargs="+",
default=["-"],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["-"],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/roberta/multiprocessing_bpe_encoder.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split("-")
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, "r") as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r"\s+", " ", context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r"\s+", " ", qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help="input directory for downloaded RACE dataset",
)
parser.add_argument(
"--output-dir",
help="output directory for extracted data",
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [
os.path.join(args.output_dir, set_type + ".input" + str(i + 1))
for i in range(4)
]
qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, "w")
outf_label = open(outf_label_path, "w")
for example in examples:
outf_context.write(example.paragraph + "\n")
for i in range(4):
qa_files[i].write(example.qa_list[i] + "\n")
outf_label.write(str(example.label) + "\n")
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/roberta/preprocess_RACE.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import lru_cache
def convert_sentence_to_json(sentence):
if "_" in sentence:
prefix, rest = sentence.split("_", 1)
query, rest = rest.split("_", 1)
query_index = len(prefix.rstrip().split(" "))
else:
query, query_index = None, None
prefix, rest = sentence.split("[", 1)
pronoun, rest = rest.split("]", 1)
pronoun_index = len(prefix.rstrip().split(" "))
sentence = sentence.replace("_", "").replace("[", "").replace("]", "")
return {
"idx": 0,
"text": sentence,
"target": {
"span1_index": query_index,
"span1_text": query,
"span2_index": pronoun_index,
"span2_text": pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, "NONE"
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {"NOUN", "PROPN"} else "NONE"
if np_type != cur_np:
if cur_np != "NONE":
noun_chunks.add((np_start, i))
if np_type != "NONE":
np_start = i
cur_np = np_type
if cur_np != "NONE":
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i :].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i :]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i : next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang="en")
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and "label" in sample and not sample["label"]:
# only consider examples where the query is correct
continue
target = sample["target"]
# clean up the query
query = target["span1_text"]
if query is not None:
if "\n" in query:
continue
if query.endswith(".") or query.endswith(","):
query = query[:-1]
# split tokens
tokens = sample["text"].split(" ")
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target["span2_index"]
pronoun = strip_pronoun(target["span2_text"])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception("Misaligned pronoun!")
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1 :]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = " " if pronoun_idx > 0 else ""
trailing_space = " " if len(after) > 0 else ""
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith(".") or pronoun.endswith(","):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith(".") or after.startswith(","):
trailing_space = ""
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = "_{}_{}".format(
query_span.text,
(" " if query_span.text_with_ws.endswith(" ") else ""),
)
pronoun_with_ws = "[{}]{}".format(
pronoun_span.text,
(" " if pronoun_span.text_with_ws.endswith(" ") else ""),
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[: first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end : second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end :].text
)
yield sentence, sample.get("label", None)
else:
yield sentence, pronoun_span, query, sample.get("label", None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = (
sample["sentence"],
sample["option1"],
sample["option2"],
)
pronoun_span = (sentence.index("_"), sentence.index("_") + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample["answer"] == "1" else option2
cand = option2 if sample["answer"] == "1" else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(
chunks, exclude_pronouns=False, exclude_query=None, exact_match=False
):
if exclude_pronouns:
chunks = [
np
for np in chunks
if (np.lemma_ != "-PRON-" and not all(tok.pos_ == "PRON" for tok in np))
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
not exact_match and (lower_chunk in excl or excl in lower_chunk)
) or lower_chunk == excl:
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
| EXA-1-master | exa/libraries/fairseq/examples/roberta/wsc/wsc_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args.bpe)
self.tokenizer = encoders.build_tokenizer(args.tokenizer)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output
| EXA-1-master | exa/libraries/fairseq/examples/roberta/wsc/wsc_criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
| EXA-1-master | exa/libraries/fairseq/examples/roberta/wsc/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from . import wsc_utils
@register_task("wsc")
class WSCTask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == "gpt2":
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "wsc", "Must set --criterion=wsc"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=append_eos,
add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[: pronoun_span.start].text
suffix = sentence[pronoun_span.end :].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = (
" " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else ""
)
trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else ""
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text,
prefix,
suffix,
leading_space,
trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1] * len(labels))
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"labels": labels,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + "\n").encode("utf-8"))
dataset = self.load_dataset(
"disambiguate_pronoun",
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample["candidate_tokens"][0],
sample["candidate_masks"][0],
)
if sample["query_tokens"][0] is not None:
query_lprobs = get_lprobs(
sample["query_tokens"][0].unsqueeze(0),
sample["query_masks"][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample["candidate_tokens"][0][best_idx]
mask = sample["candidate_masks"][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task("winogrande")
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "winogrande", "Must set --criterion=winogrande"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test"))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[: pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1] :]
leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else ""
trailing_space = ""
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query,
prefix,
suffix,
leading_space,
trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text,
prefix,
suffix,
leading_space,
trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(
candidate_tokens, candidate_lengths, self.vocab.pad()
)
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| EXA-1-master | exa/libraries/fairseq/examples/roberta/wsc/wsc_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import commonsense_qa_task # noqa
| EXA-1-master | exa/libraries/fairseq/examples/roberta/commonsense_qa/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
@register_task("commonsense_qa")
class CommonsenseQATask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument("--num-classes", type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=True,
add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if "answerKey" in example:
label = ord(example["answerKey"]) - ord("A")
labels.append(label)
question = example["question"]["stem"]
assert len(example["question"]["choices"]) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = "Q: " + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example["question"]["choices"]):
src = "A: " + choice["text"]
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(
len(src_tokens[0]) == len(src_tokens[i])
for i in range(self.args.num_classes)
)
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update(
{
"net_input{}".format(i + 1): {
"src_tokens": RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths[i],
}
}
)
if len(labels) > 0:
dataset.update({"target": RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print("| Loaded {} with {} samples".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args, from_checkpoint=False):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
"sentence_classification_head",
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| EXA-1-master | exa/libraries/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import sacremoses
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("files", nargs="*", help="input files")
args = parser.parse_args()
detok = sacremoses.MosesDetokenizer()
for line in fileinput.input(args.files, openhook=fileinput.hook_compressed):
print(
detok.detokenize(line.strip().split(" "))
.replace(" @", "")
.replace("@ ", "")
.replace(" =", "=")
.replace("= ", "=")
.replace(" – ", "–")
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/megatron_11b/detok.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def make_parser():
parser = options.get_speech_generation_parser()
parser.add_argument("--dump-features", action="store_true")
parser.add_argument("--dump-waveforms", action="store_true")
parser.add_argument("--dump-attentions", action="store_true")
parser.add_argument("--dump-eos-probs", action="store_true")
parser.add_argument("--dump-plots", action="store_true")
parser.add_argument("--dump-target", action="store_true")
parser.add_argument("--output-sample-rate", default=22050, type=int)
parser.add_argument("--teacher-forcing", action="store_true")
parser.add_argument(
"--audio-format", type=str, default="wav", choices=["wav", "flac"]
)
return parser
def postprocess_results(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
texts = sample["src_texts"] if "src_texts" in sample else [""] * len(hypos)
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs)
def dump_result(
is_na_model,
args,
vocoder,
sample_id,
text,
attn,
eos_prob,
feat_pred,
wave_pred,
feat_targ,
wave_targ,
):
sample_rate = args.output_sample_rate
out_root = Path(args.results_path)
if args.dump_features:
feat_dir = out_root / "feat"
feat_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_dir / f"{sample_id}.npy", feat_pred)
if args.dump_target:
feat_tgt_dir = out_root / "feat_tgt"
feat_tgt_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ)
if args.dump_attentions:
attn_dir = out_root / "attn"
attn_dir.mkdir(exist_ok=True, parents=True)
np.save(attn_dir / f"{sample_id}.npy", attn.numpy())
if args.dump_eos_probs and not is_na_model:
eos_dir = out_root / "eos"
eos_dir.mkdir(exist_ok=True, parents=True)
np.save(eos_dir / f"{sample_id}.npy", eos_prob)
if args.dump_plots:
images = [feat_pred.T] if is_na_model else [feat_pred.T, attn]
names = ["output"] if is_na_model else ["output", "alignment"]
if feat_targ is not None:
images = [feat_targ.T] + images
names = [f"target (idx={sample_id})"] + names
if is_na_model:
plot_tts_output(images, names, attn, "alignment", suptitle=text)
else:
plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text)
plot_dir = out_root / "plot"
plot_dir.mkdir(exist_ok=True, parents=True)
plt.savefig(plot_dir / f"{sample_id}.png")
plt.close()
if args.dump_waveforms:
ext = args.audio_format
if wave_pred is not None:
wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}"
wav_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate)
if args.dump_target and wave_targ is not None:
wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt"
wav_tgt_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate)
def main(args):
assert(args.dump_features or args.dump_waveforms or args.dump_attentions
or args.dump_eos_probs or args.dump_plots)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 8000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
task = tasks.setup_task(args)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
task=task,
arg_overrides=ast.literal_eval(args.model_overrides),
)
model = models[0].cuda() if use_cuda else models[0]
# use the original n_frames_per_step
task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
data_cfg = task.data_cfg
sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050)
resample_fn = {
False: lambda x: x,
True: lambda x: torchaudio.sox_effects.apply_effects_tensor(
x.detach().cpu().unsqueeze(0), sample_rate,
[['rate', str(args.output_sample_rate)]]
)[0].squeeze(0)
}.get(args.output_sample_rate != sample_rate)
if args.output_sample_rate != sample_rate:
logger.info(f"resampling to {args.output_sample_rate}Hz")
generator = task.build_generator([model], args)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
Path(args.results_path).mkdir(exist_ok=True, parents=True)
is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False)
dataset = task.dataset(args.gen_subset)
vocoder = task.args.vocoder
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
hypos = generator.generate(model, sample, has_targ=args.dump_target)
for result in postprocess_results(
dataset, sample, hypos, resample_fn, args.dump_target
):
dump_result(is_na_model, args, vocoder, *result)
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/generate_waveform.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
from pathlib import Path
from typing import Optional, List, Dict
import zipfile
import tempfile
from dataclasses import dataclass
from itertools import groupby
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from fairseq.data.audio.audio_utils import (
TTSSpectrogram, TTSMelScale, parse_path, read_from_stored_zip, is_npy_data
)
def trim_or_pad_to_target_length(
data_1d_or_2d: np.ndarray, target_length: int
) -> np.ndarray:
assert len(data_1d_or_2d.shape) in {1, 2}
delta = data_1d_or_2d.shape[0] - target_length
if delta >= 0: # trim if being longer
data_1d_or_2d = data_1d_or_2d[: target_length]
else: # pad if being shorter
if len(data_1d_or_2d.shape) == 1:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros(-delta)], axis=0
)
else:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))],
axis=0
)
return data_1d_or_2d
def extract_logmel_spectrogram(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, win_length: int = 1024,
hop_length: int = 256, n_fft: int = 1024,
win_fn: callable = torch.hann_window, n_mels: int = 80,
f_min: float = 0., f_max: float = 8000, eps: float = 1e-5,
overwrite: bool = False, target_length: Optional[int] = None
):
if output_path is not None and output_path.is_file() and not overwrite:
return
spectrogram_transform = TTSSpectrogram(
n_fft=n_fft, win_length=win_length, hop_length=hop_length,
window_fn=win_fn
)
mel_scale_transform = TTSMelScale(
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
n_stft=n_fft // 2 + 1
)
spectrogram = spectrogram_transform(waveform)
mel_spec = mel_scale_transform(spectrogram)
logmel_spec = torch.clamp(mel_spec, min=eps).log()
assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1
logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D
if target_length is not None:
logmel_spec = trim_or_pad_to_target_length(logmel_spec, target_length)
if output_path is not None:
np.save(output_path.as_posix(), logmel_spec)
else:
return logmel_spec
def extract_pitch(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, hop_length: int = 256,
log_scale: bool = True, phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
try:
import pyworld
except ImportError:
raise ImportError("Please install PyWORLD: pip install pyworld")
_waveform = waveform.squeeze(0).double().numpy()
pitch, t = pyworld.dio(
_waveform, sample_rate, frame_period=hop_length / sample_rate * 1000
)
pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate)
if phoneme_durations is not None:
pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations))
try:
from scipy.interpolate import interp1d
except ImportError:
raise ImportError("Please install SciPy: pip install scipy")
nonzero_ids = np.where(pitch != 0)[0]
if len(nonzero_ids) == 0:
print((f"{output_path} has all empty values in the pitch contour"))
return
elif len(nonzero_ids) == 1:
print((f"{output_path} has only one non-zero values in the pitch contour"))
return
else:
interp_fn = interp1d(
nonzero_ids,
pitch[nonzero_ids],
fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]),
bounds_error=False,
)
pitch = interp_fn(np.arange(0, len(pitch)))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
pitch = np.array(
[
np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(pitch) == len(phoneme_durations)
if log_scale:
pitch = np.log(pitch + 1)
if output_path is not None:
np.save(output_path.as_posix(), pitch)
else:
return pitch
def extract_energy(
waveform: torch.Tensor, output_path: Optional[Path] = None,
hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True,
phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
assert len(waveform.shape) == 2 and waveform.shape[0] == 1
waveform = waveform.view(1, 1, waveform.shape[1])
waveform = F.pad(
waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0],
mode="reflect"
)
waveform = waveform.squeeze(1)
fourier_basis = np.fft.fft(np.eye(n_fft))
cutoff = int((n_fft / 2 + 1))
fourier_basis = np.vstack(
[np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])]
)
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
forward_transform = F.conv1d(
waveform, forward_basis, stride=hop_length, padding=0
)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
energy = torch.norm(magnitude, dim=1).squeeze(0).numpy()
if phoneme_durations is not None:
energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
energy = np.array(
[
np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(energy) == len(phoneme_durations)
if log_scale:
energy = np.log(energy + 1)
if output_path is not None:
np.save(output_path.as_posix(), energy)
else:
return energy
def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None):
mean_x, mean_x2, n_frames = None, None, 0
feature_paths = feature_root.glob("*.npy")
for p in tqdm(feature_paths):
with open(p, 'rb') as f:
frames = np.load(f).squeeze()
n_frames += frames.shape[0]
cur_mean_x = frames.sum(axis=0)
if mean_x is None:
mean_x = cur_mean_x
else:
mean_x += cur_mean_x
cur_mean_x2 = (frames ** 2).sum(axis=0)
if mean_x2 is None:
mean_x2 = cur_mean_x2
else:
mean_x2 += cur_mean_x2
mean_x /= n_frames
mean_x2 /= n_frames
var_x = mean_x2 - mean_x ** 2
std_x = np.sqrt(np.maximum(var_x, 1e-10))
if output_path is not None:
with open(output_path, 'wb') as f:
np.savez(f, mean=mean_x, std=std_x)
else:
return {"mean": mean_x, "std": std_x}
def ipa_phonemize(text, lang="en-us", use_g2p=False):
if use_g2p:
assert lang == "en-us", "g2pE phonemizer only works for en-us"
try:
from g2p_en import G2p
g2p = G2p()
return " ".join("|" if p == " " else p for p in g2p(text))
except ImportError:
raise ImportError(
"Please install phonemizer: pip install g2p_en"
)
else:
try:
from phonemizer import phonemize
from phonemizer.separator import Separator
return phonemize(
text, backend='espeak', language=lang,
separator=Separator(word="| ", phone=" ")
)
except ImportError:
raise ImportError(
"Please install phonemizer: pip install phonemizer"
)
@dataclass
class ForceAlignmentInfo(object):
tokens: List[str]
frame_durations: List[int]
start_sec: Optional[float]
end_sec: Optional[float]
def get_mfa_alignment_by_sample_id(
textgrid_zip_path: str, sample_id: str, sample_rate: int,
hop_length: int, silence_phones: List[str] = ("sil", "sp", "spn")
) -> ForceAlignmentInfo:
try:
import tgt
except ImportError:
raise ImportError("Please install TextGridTools: pip install tgt")
filename = f"{sample_id}.TextGrid"
out_root = Path(tempfile.gettempdir())
tgt_path = out_root / filename
with zipfile.ZipFile(textgrid_zip_path) as f_zip:
f_zip.extract(filename, path=out_root)
textgrid = tgt.io.read_textgrid(tgt_path.as_posix())
os.remove(tgt_path)
phones, frame_durations = [], []
start_sec, end_sec, end_idx = 0, 0, 0
for t in textgrid.get_tier_by_name("phones")._objects:
s, e, p = t.start_time, t.end_time, t.text
# Trim leading silences
if len(phones) == 0:
if p in silence_phones:
continue
else:
start_sec = s
phones.append(p)
if p not in silence_phones:
end_sec = e
end_idx = len(phones)
r = sample_rate / hop_length
frame_durations.append(int(np.round(e * r) - np.round(s * r)))
# Trim tailing silences
phones = phones[:end_idx]
frame_durations = frame_durations[:end_idx]
return ForceAlignmentInfo(
tokens=phones, frame_durations=frame_durations, start_sec=start_sec,
end_sec=end_sec
)
def get_mfa_alignment(
textgrid_zip_path: str, sample_ids: List[str], sample_rate: int,
hop_length: int
) -> Dict[str, ForceAlignmentInfo]:
return {
i: get_mfa_alignment_by_sample_id(
textgrid_zip_path, i, sample_rate, hop_length
) for i in tqdm(sample_ids)
}
def get_unit_alignment(
id_to_unit_tsv_path: str, sample_ids: List[str]
) -> Dict[str, ForceAlignmentInfo]:
id_to_units = {
e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path)
}
id_to_units = {i: id_to_units[i].split() for i in sample_ids}
id_to_units_collapsed = {
i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items()
}
id_to_durations = {
i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items()
}
return {
i: ForceAlignmentInfo(
tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i],
start_sec=None, end_sec=None
)
for i in sample_ids
}
def get_feature_value_min_max(feature_paths: List[str]):
v_min, v_max = 1e-8, -1e-8
for p in tqdm(feature_paths):
_path, slice_ptr = parse_path(p)
assert len(slice_ptr) == 2
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_npy_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
features = np.load(path_or_fp).squeeze()
v_min = min(v_min, features.min().item())
v_max = max(v_max, features.max().item())
return v_min, v_max
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from scipy.interpolate import interp1d
import torchaudio
from fairseq.tasks.text_to_speech import (
batch_compute_distortion, compute_rms_dist
)
def batch_mel_spectral_distortion(
y1, y2, sr, normalize_type="path", mel_fn=None
):
"""
https://arxiv.org/pdf/2011.03568.pdf
Same as Mel Cepstral Distortion, but computed on log-mel spectrograms.
"""
if mel_fn is None or mel_fn.sample_rate != sr:
mel_fn = torchaudio.transforms.MelSpectrogram(
sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr),
hop_length=int(0.0125 * sr), f_min=20, n_mels=80,
window_fn=torch.hann_window
).to(y1[0].device)
offset = 1e-6
return batch_compute_distortion(
y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2),
compute_rms_dist, normalize_type
)
# This code is based on
# "https://github.com/bastibe/MAPS-Scripts/blob/master/helper.py"
def _same_t_in_true_and_est(func):
def new_func(true_t, true_f, est_t, est_f):
assert type(true_t) is np.ndarray
assert type(true_f) is np.ndarray
assert type(est_t) is np.ndarray
assert type(est_f) is np.ndarray
interpolated_f = interp1d(
est_t, est_f, bounds_error=False, kind='nearest', fill_value=0
)(true_t)
return func(true_t, true_f, true_t, interpolated_f)
return new_func
@_same_t_in_true_and_est
def gross_pitch_error(true_t, true_f, est_t, est_f):
"""The relative frequency in percent of pitch estimates that are
outside a threshold around the true pitch. Only frames that are
considered pitched by both the ground truth and the estimator (if
applicable) are considered.
"""
correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(gross_pitch_error_frames) / np.sum(correct_frames)
def _gross_pitch_error_frames(true_t, true_f, est_t, est_f, eps=1e-8):
voiced_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
true_f_p_eps = [x + eps for x in true_f]
pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2
return voiced_frames & pitch_error_frames
def _true_voiced_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) & (true_f != 0)
def _voicing_decision_error_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) != (true_f != 0)
@_same_t_in_true_and_est
def f0_frame_error(true_t, true_f, est_t, est_f):
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return (np.sum(gross_pitch_error_frames) +
np.sum(voicing_decision_error_frames)) / (len(true_t))
@_same_t_in_true_and_est
def voicing_decision_error(true_t, true_f, est_t, est_f):
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(voicing_decision_error_frames) / (len(true_t))
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import csv
import numpy as np
import os.path as op
import torch
import tqdm
from tabulate import tabulate
import torchaudio
from examples.speech_synthesis.utils import batch_mel_spectral_distortion
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
def load_eval_spec(path):
with open(path) as f:
reader = csv.DictReader(f, delimiter='\t')
samples = list(reader)
return samples
def eval_distortion(samples, distortion_fn, device="cuda"):
nmiss = 0
results = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
nmiss += 1
results.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0].to(device), ysyn[0].to(device)
assert sr == _sr, f"{sr} != {_sr}"
distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
_, _, _, _, _, pathmap = extra
nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
results.append(
(distortion.item(), # path distortion
pathmap.size(0), # yref num frames
pathmap.size(1), # ysyn num frames
pathmap.sum().item(), # path length
nins.item(), # insertion
ndel.item(), # deletion
)
)
return results
def eval_mel_cepstral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_cepstral_distortion, device)
def eval_mel_spectral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_spectral_distortion, device)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0)
res = {
"nutt": len(results),
"dist": dist,
"dur_ref": int(dur_ref),
"dur_syn": int(dur_syn),
"dur_ali": int(dur_ali),
"dist_per_ref_frm": dist/dur_ref,
"dist_per_syn_frm": dist/dur_syn,
"dist_per_ali_frm": dist/dur_ali,
"ins": nins/dur_ref,
"del": ndel/dur_ref,
}
print(tabulate(
[res.values()],
res.keys(),
floatfmt=".4f"
))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_spec, mcd, msd, show_bin):
samples = load_eval_spec(eval_spec)
device = "cpu"
if mcd:
print("===== Evaluate Mean Cepstral Distortion =====")
results = eval_mel_cepstral_distortion(samples, device)
print_results(results, show_bin)
if msd:
print("===== Evaluate Mean Spectral Distortion =====")
results = eval_mel_spectral_distortion(samples, device)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_spec")
parser.add_argument("--mcd", action="store_true")
parser.add_argument("--msd", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_spec, args.mcd, args.msd, args.show_bin)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/evaluation/eval_sp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import editdistance
import re
import shutil
import soundfile as sf
import subprocess
from pathlib import Path
from examples.speech_to_text.data_utils import load_tsv_to_dicts
def preprocess_text(text):
text = "|".join(re.sub(r"[^A-Z' ]", " ", text.upper()).split())
text = " ".join(text)
return text
def prepare_w2v_data(
dict_dir, sample_rate, label, audio_paths, texts, split, data_dir
):
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(
dict_dir / f"dict.{label}.txt",
data_dir / f"dict.{label}.txt"
)
with open(data_dir / f"{split}.tsv", "w") as f:
f.write("/\n")
for audio_path in audio_paths:
wav, sr = sf.read(audio_path)
assert sr == sample_rate, f"{sr} != sample_rate"
nsample = len(wav)
f.write(f"{audio_path}\t{nsample}\n")
with open(data_dir / f"{split}.{label}", "w") as f:
for text in texts:
text = preprocess_text(text)
f.write(f"{text}\n")
def run_asr(asr_dir, split, w2v_ckpt, w2v_label, res_dir):
"""
results will be saved at
{res_dir}/{ref,hypo}.word-{w2v_ckpt.filename}-{split}.txt
"""
cmd = ["python", "-m", "examples.speech_recognition.infer"]
cmd += [str(asr_dir.resolve())]
cmd += ["--task", "audio_finetuning", "--nbest", "1", "--quiet"]
cmd += ["--w2l-decoder", "viterbi", "--criterion", "ctc"]
cmd += ["--post-process", "letter", "--max-tokens", "4000000"]
cmd += ["--path", str(w2v_ckpt.resolve()), "--labels", w2v_label]
cmd += ["--gen-subset", split, "--results-path", str(res_dir.resolve())]
print(f"running cmd:\n{' '.join(cmd)}")
subprocess.run(cmd, check=True)
def compute_error_rate(hyp_wrd_path, ref_wrd_path, unit="word"):
"""each line is "<text> (None-<index>)" """
tokenize_line = {
"word": lambda x: re.sub(r" \(.*\)$", "", x.rstrip()).split(),
"char": lambda x: list(re.sub(r" \(.*\)$", "", x.rstrip()))
}.get(unit)
if tokenize_line is None:
raise ValueError(f"{unit} not supported")
inds = [int(re.sub(r"\D*(\d*)\D*", r"\1", line))
for line in open(hyp_wrd_path)]
hyps = [tokenize_line(line) for line in open(hyp_wrd_path)]
refs = [tokenize_line(line) for line in open(ref_wrd_path)]
assert(len(hyps) == len(refs))
err_rates = [
editdistance.eval(hyp, ref) / len(ref) for hyp, ref in zip(hyps, refs)
]
ind_to_err_rates = {i: e for i, e in zip(inds, err_rates)}
return ind_to_err_rates
def main(args):
samples = load_tsv_to_dicts(args.raw_manifest)
ids = [
sample[args.id_header] if args.id_header else "" for sample in samples
]
audio_paths = [sample[args.audio_header] for sample in samples]
texts = [sample[args.text_header] for sample in samples]
prepare_w2v_data(
args.w2v_dict_dir,
args.w2v_sample_rate,
args.w2v_label,
audio_paths,
texts,
args.split,
args.asr_dir
)
run_asr(args.asr_dir, args.split, args.w2v_ckpt, args.w2v_label, args.asr_dir)
ind_to_err_rates = compute_error_rate(
args.asr_dir / f"hypo.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.asr_dir / f"ref.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.err_unit,
)
uer_path = args.asr_dir / f"uer_{args.err_unit}.{args.split}.tsv"
with open(uer_path, "w") as f:
f.write("id\taudio\tuer\n")
for ind, (id_, audio_path) in enumerate(zip(ids, audio_paths)):
f.write(f"{id_}\t{audio_path}\t{ind_to_err_rates[ind]:.4f}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest", required=True, type=Path)
parser.add_argument("--asr-dir", required=True, type=Path)
parser.add_argument("--id-header", default="id", type=str)
parser.add_argument("--audio-header", default="audio", type=str)
parser.add_argument("--text-header", default="src_text", type=str)
parser.add_argument("--split", default="raw", type=str)
parser.add_argument("--w2v-ckpt", required=True, type=Path)
parser.add_argument("--w2v-dict-dir", required=True, type=Path)
parser.add_argument("--w2v-sample-rate", default=16000, type=int)
parser.add_argument("--w2v-label", default="ltr", type=str)
parser.add_argument("--err-unit", default="word", type=str)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/evaluation/eval_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import numpy as np
import os.path as op
import torchaudio
import tqdm
from tabulate import tabulate
from examples.speech_synthesis.utils import (
gross_pitch_error, voicing_decision_error, f0_frame_error
)
from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec
def difference_function(x, n, tau_max):
"""
Compute difference function of data x. This solution is implemented directly
with Numpy fft.
:param x: audio data
:param n: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
x = np.array(x, np.float64)
w = x.size
tau_max = min(tau_max, w)
x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
fc = np.fft.rfft(x, size_pad)
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - \
2 * conv
def cumulative_mean_normalized_difference_function(df, n):
"""
Compute cumulative mean normalized difference function (CMND).
:param df: Difference function
:param n: length of data
:return: cumulative mean normalized difference function
:rtype: list
"""
# scipy method
cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float)
return np.insert(cmn_df, 0, 1)
def get_pitch(cmdf, tau_min, tau_max, harmo_th=0.1):
"""
Return fundamental period of a frame based on CMND function.
:param cmdf: Cumulative Mean Normalized Difference function
:param tau_min: minimum period for speech
:param tau_max: maximum period for speech
:param harmo_th: harmonicity threshold to determine if it is necessary to
compute pitch frequency
:return: fundamental period if there is values under threshold, 0 otherwise
:rtype: float
"""
tau = tau_min
while tau < tau_max:
if cmdf[tau] < harmo_th:
while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]:
tau += 1
return tau
tau += 1
return 0 # if unvoiced
def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500,
harmo_thresh=0.1):
"""
Compute the Yin Algorithm. Return fundamental frequency and harmonic rate.
https://github.com/NVIDIA/mellotron adaption of
https://github.com/patriceguyot/Yin
:param sig: Audio signal (list of float)
:param sr: sampling rate (int)
:param w_len: size of the analysis window (samples)
:param w_step: size of the lag between two consecutives windows (samples)
:param f0_min: Minimum fundamental frequency that can be detected (hertz)
:param f0_max: Maximum fundamental frequency that can be detected (hertz)
:param harmo_thresh: Threshold of detection. The yalgorithmù return the
first minimum of the CMND function below this threshold.
:returns:
* pitches: list of fundamental frequencies,
* harmonic_rates: list of harmonic rate values for each fundamental
frequency value (= confidence value)
* argmins: minimums of the Cumulative Mean Normalized DifferenceFunction
* times: list of time of each estimation
:rtype: tuple
"""
tau_min = int(sr / f0_max)
tau_max = int(sr / f0_min)
# time values for each analysis window
time_scale = range(0, len(sig) - w_len, w_step)
times = [t/float(sr) for t in time_scale]
frames = [sig[t:t + w_len] for t in time_scale]
pitches = [0.0] * len(time_scale)
harmonic_rates = [0.0] * len(time_scale)
argmins = [0.0] * len(time_scale)
for i, frame in enumerate(frames):
# Compute YIN
df = difference_function(frame, w_len, tau_max)
cm_df = cumulative_mean_normalized_difference_function(df, tau_max)
p = get_pitch(cm_df, tau_min, tau_max, harmo_thresh)
# Get results
if np.argmin(cm_df) > tau_min:
argmins[i] = float(sr / np.argmin(cm_df))
if p != 0: # A pitch was found
pitches[i] = float(sr / p)
harmonic_rates[i] = cm_df[p]
else: # No pitch, but we compute a value of the harmonic rate
harmonic_rates[i] = min(cm_df)
return pitches, harmonic_rates, argmins, times
def extract_f0(samples):
f0_samples = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
f0_samples.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0], ysyn[0]
assert sr == _sr, f"{sr} != {_sr}"
yref_f0 = compute_yin(yref, sr)
ysyn_f0 = compute_yin(ysyn, sr)
f0_samples += [
{
"ref": yref_f0,
"syn": ysyn_f0
}
]
return f0_samples
def eval_f0_error(samples, distortion_fn):
results = []
for sample in tqdm.tqdm(samples):
if sample is None:
results.append(None)
continue
# assume single channel
yref_f, _, _, yref_t = sample["ref"]
ysyn_f, _, _, ysyn_t = sample["syn"]
yref_f = np.array(yref_f)
yref_t = np.array(yref_t)
ysyn_f = np.array(ysyn_f)
ysyn_t = np.array(ysyn_t)
distortion = distortion_fn(yref_t, yref_f, ysyn_t, ysyn_f)
results.append((distortion.item(),
len(yref_f),
len(ysyn_f)
))
return results
def eval_gross_pitch_error(samples):
return eval_f0_error(samples, gross_pitch_error)
def eval_voicing_decision_error(samples):
return eval_f0_error(samples, voicing_decision_error)
def eval_f0_frame_error(samples):
return eval_f0_error(samples, f0_frame_error)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
res = {
"nutt": len(results),
"error": results[:, 0].mean(),
"std": results[:, 0].std(),
"dur_ref": int(results[:, 1].sum()),
"dur_syn": int(results[:, 2].sum()),
}
print(tabulate([res.values()], res.keys(), floatfmt=".4f"))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_f0, gpe, vde, ffe, show_bin):
samples = load_eval_spec(eval_f0)
if gpe or vde or ffe:
f0_samples = extract_f0(samples)
if gpe:
print("===== Evaluate Gross Pitch Error =====")
results = eval_gross_pitch_error(f0_samples)
print_results(results, show_bin)
if vde:
print("===== Evaluate Voicing Decision Error =====")
results = eval_voicing_decision_error(f0_samples)
print_results(results, show_bin)
if ffe:
print("===== Evaluate F0 Frame Error =====")
results = eval_f0_frame_error(f0_samples)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_f0")
parser.add_argument("--gpe", action="store_true")
parser.add_argument("--vde", action="store_true")
parser.add_argument("--ffe", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_f0, args.gpe, args.vde, args.ffe, args.show_bin)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/evaluation/eval_f0.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
def main(args):
"""
`uid syn ref text`
"""
in_root = Path(args.generation_root).resolve()
ext = args.audio_format
with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out:
reader = csv.DictReader(
f, delimiter="\t", quotechar=None, doublequote=False,
lineterminator="\n", quoting=csv.QUOTE_NONE
)
header = ["id", "syn", "ref", "text", "speaker"]
f_out.write("\t".join(header) + "\n")
for row in reader:
dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}"
id_ = row["id"]
syn = (in_root / dir_name / f"{id_}.{ext}").as_posix()
ref = row["audio"]
if args.use_resynthesized_target:
ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix()
if args.eval_target:
syn = row["audio"]
sample = [id_, syn, ref, row["tgt_text"], row["speaker"]]
f_out.write("\t".join(sample) + "\n")
print(f"wrote evaluation file to {args.output_path}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--generation-root", help="output directory for generate_waveform.py"
)
parser.add_argument(
"--audio-manifest",
help="used to determine the original utterance ID and text"
)
parser.add_argument(
"--output-path", help="path to output evaluation spec file"
)
parser.add_argument(
"--use-resynthesized-target", action="store_true",
help="use resynthesized reference instead of the original audio"
)
parser.add_argument(
"--eval-target", action="store_true",
help="evaluate reference instead of model prediction"
)
parser.add_argument("--vocoder", type=str, default="griffin_lim")
parser.add_argument("--sample-rate", type=int, default=22_050)
parser.add_argument("--audio-format", type=str, default="wav")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/evaluation/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import numpy as np
import re
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import VCTK
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def normalize_text(text):
return re.sub(r"[^a-zA-Z.?!,'\- ]", '', text)
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
dataset = VCTK(out_root.as_posix(), download=False)
ids = list(dataset._walker)
np.random.seed(args.seed)
np.random.shuffle(ids)
n_train = len(ids) - args.n_dev - args.n_test
_split = ["train"] * n_train + ["dev"] * args.n_dev + ["test"] * args.n_test
id_to_split = dict(zip(ids, _split))
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, text, speaker_id, _) in progress:
sample_id = dataset._walker[i]
_split = id_to_split[sample_id]
audio_dir = Path(dataset._path) / dataset._folder_audio / speaker_id
audio_path = audio_dir / f"{sample_id}.wav"
text = normalize_text(text)
manifest_by_split[_split]["id"].append(sample_id)
manifest_by_split[_split]["audio"].append(audio_path.as_posix())
manifest_by_split[_split]["n_frames"].append(len(waveform[0]))
manifest_by_split[_split]["tgt_text"].append(text)
manifest_by_split[_split]["speaker"].append(speaker_id)
manifest_by_split[_split]["src_text"].append(text)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for _split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[_split]),
manifest_root / f"{_split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--n-dev", default=50, type=int)
parser.add_argument("--n-test", default=100, type=int)
parser.add_argument("--seed", "-s", default=1234, type=int)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import defaultdict
from itertools import chain
from pathlib import Path
import numpy as np
import torchaudio
import torchaudio.sox_effects as ta_sox
import yaml
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from examples.speech_synthesis.preprocessing.speaker_embedder import SpkrEmbedder
def extract_embedding(audio_path, embedder):
wav, sr = torchaudio.load(audio_path) # 2D
if sr != embedder.RATE:
wav, sr = ta_sox.apply_effects_tensor(
wav, sr, [["rate", str(embedder.RATE)]]
)
try:
emb = embedder([wav[0].cuda().float()]).cpu().numpy()
except RuntimeError:
emb = None
return emb
def process(args):
print("Fetching data...")
raw_manifest_root = Path(args.raw_manifest_root).absolute()
samples = [load_tsv_to_dicts(raw_manifest_root / (s + ".tsv"))
for s in args.splits]
samples = list(chain(*samples))
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open(f"{config['audio_root']}/{config['speaker_set_filename']}") as f:
speaker_to_id = {r.strip(): i for i, r in enumerate(f)}
embedder = SpkrEmbedder(args.ckpt).cuda()
speaker_to_cnt = defaultdict(float)
speaker_to_emb = defaultdict(float)
for sample in tqdm(samples, desc="extract emb"):
emb = extract_embedding(sample["audio"], embedder)
if emb is not None:
speaker_to_cnt[sample["speaker"]] += 1
speaker_to_emb[sample["speaker"]] += emb
if len(speaker_to_emb) != len(speaker_to_id):
missed = set(speaker_to_id) - set(speaker_to_emb.keys())
print(
f"WARNING: missing embeddings for {len(missed)} speaker:\n{missed}"
)
speaker_emb_mat = np.zeros((len(speaker_to_id), len(emb)), float)
for speaker in speaker_to_emb:
idx = speaker_to_id[speaker]
emb = speaker_to_emb[speaker]
cnt = speaker_to_cnt[speaker]
speaker_emb_mat[idx, :] = emb / cnt
speaker_emb_name = "speaker_emb.npy"
speaker_emb_path = f"{config['audio_root']}/{speaker_emb_name}"
np.save(speaker_emb_path, speaker_emb_mat)
config["speaker_emb_filename"] = speaker_emb_name
with open(args.new_config, "w") as f:
yaml.dump(config, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest-root", "-m", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train"])
parser.add_argument("--config", "-c", required=True, type=str)
parser.add_argument("--new-config", "-n", required=True, type=str)
parser.add_argument("--ckpt", required=True, type=str,
help="speaker embedder checkpoint")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/get_speaker_embedding.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import LJSPEECH
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
# following FastSpeech's splits
dataset = LJSPEECH(out_root.as_posix(), download=True)
id_to_split = {}
for x in dataset._flist:
id_ = x[0]
speaker = id_.split("-")[0]
id_to_split[id_] = {
"LJ001": "test", "LJ002": "test", "LJ003": "dev"
}.get(speaker, "train")
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, utt, normalized_utt) in progress:
sample_id = dataset._flist[i][0]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
audio_path = f"{dataset._path}/{sample_id}.wav"
manifest_by_split[split]["audio"].append(audio_path)
manifest_by_split[split]["n_frames"].append(len(waveform[0]))
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append("ljspeech")
manifest_by_split[split]["src_text"].append(utt)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
manifest_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from collections import Counter, defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import (
create_zip,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_tsv_to_dicts,
save_df_to_tsv
)
from examples.speech_synthesis.data_utils import (
extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn,
ipa_phonemize, get_mfa_alignment, get_unit_alignment,
get_feature_value_min_max
)
log = logging.getLogger(__name__)
def process(args):
assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
samples = []
for s in args.splits:
for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"):
e["split"] = s
samples.append(e)
sample_ids = [s["id"] for s in samples]
# Get alignment info
id_to_alignment = None
if args.textgrid_zip is not None:
assert args.id_to_units_tsv is None
id_to_alignment = get_mfa_alignment(
args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length
)
elif args.id_to_units_tsv is not None:
# assume identical hop length on the unit sequence
id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids)
# Extract features and pack features into ZIP
feature_name = "logmelspec80"
zip_path = out_root / f"{feature_name}.zip"
pitch_zip_path = out_root / "pitch.zip"
energy_zip_path = out_root / "energy.zip"
gcmvn_npz_path = out_root / "gcmvn_stats.npz"
if zip_path.exists() and gcmvn_npz_path.exists():
print(f"{zip_path} and {gcmvn_npz_path} exist.")
else:
feature_root = out_root / feature_name
feature_root.mkdir(exist_ok=True)
pitch_root = out_root / "pitch"
energy_root = out_root / "energy"
if args.add_fastspeech_targets:
pitch_root.mkdir(exist_ok=True)
energy_root.mkdir(exist_ok=True)
print("Extracting Mel spectrogram features...")
for sample in tqdm(samples):
waveform, sample_rate = torchaudio.load(sample["audio"])
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
to_sample_rate=args.sample_rate
)
sample_id = sample["id"]
target_length = None
if id_to_alignment is not None:
a = id_to_alignment[sample_id]
target_length = sum(a.frame_durations)
if a.start_sec is not None and a.end_sec is not None:
start_frame = int(a.start_sec * sample_rate)
end_frame = int(a.end_sec * sample_rate)
waveform = waveform[:, start_frame: end_frame]
extract_logmel_spectrogram(
waveform, sample_rate, feature_root / f"{sample_id}.npy",
win_length=args.win_length, hop_length=args.hop_length,
n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
f_max=args.f_max, target_length=target_length
)
if args.add_fastspeech_targets:
assert id_to_alignment is not None
extract_pitch(
waveform, sample_rate, pitch_root / f"{sample_id}.npy",
hop_length=args.hop_length, log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
extract_energy(
waveform, energy_root / f"{sample_id}.npy",
hop_length=args.hop_length, n_fft=args.n_fft,
log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
get_global_cmvn(feature_root, gcmvn_npz_path)
shutil.rmtree(feature_root)
if args.add_fastspeech_targets:
create_zip(pitch_root, pitch_zip_path)
shutil.rmtree(pitch_root)
create_zip(energy_root, energy_zip_path)
shutil.rmtree(energy_root)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4
if args.add_fastspeech_targets:
pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path)
energy_paths, energy_lengths = get_zip_manifest(energy_zip_path)
# Generate TSV manifest
print("Generating manifest...")
id_to_cer = None
if args.cer_threshold is not None:
assert Path(args.cer_tsv_path).is_file()
id_to_cer = {
x["id"]: x["uer"] for x in load_tsv_to_dicts(args.cer_tsv_path)
}
manifest_by_split = {split: defaultdict(list) for split in args.splits}
for sample in tqdm(samples):
sample_id, split = sample["id"], sample["split"]
if args.snr_threshold is not None and "snr" in sample \
and sample["snr"] < args.snr_threshold:
continue
if args.cer_threshold is not None \
and id_to_cer[sample_id] > args.cer_threhold:
continue
normalized_utt = sample["tgt_text"]
if id_to_alignment is not None:
normalized_utt = " ".join(id_to_alignment[sample_id].tokens)
elif args.ipa_vocab:
normalized_utt = ipa_phonemize(
normalized_utt, lang=args.lang, use_g2p=args.use_g2p
)
manifest_by_split[split]["id"].append(sample_id)
manifest_by_split[split]["audio"].append(audio_paths[sample_id])
manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id])
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append(sample["speaker"])
manifest_by_split[split]["src_text"].append(sample["src_text"])
if args.add_fastspeech_targets:
assert id_to_alignment is not None
duration = " ".join(
str(d) for d in id_to_alignment[sample_id].frame_durations
)
manifest_by_split[split]["duration"].append(duration)
manifest_by_split[split]["pitch"].append(pitch_paths[sample_id])
manifest_by_split[split]["energy"].append(energy_paths[sample_id])
for split in args.splits:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
out_root / f"{split}.tsv"
)
# Generate vocab
vocab_name, spm_filename = None, None
if id_to_alignment is not None or args.ipa_vocab:
vocab = Counter()
for t in manifest_by_split["train"]["tgt_text"]:
vocab.update(t.split(" "))
vocab_name = "vocab.txt"
with open(out_root / vocab_name, "w") as f:
for s, c in vocab.most_common():
f.write(f"{s} {c}\n")
else:
spm_filename_prefix = "spm_char"
spm_filename = f"{spm_filename_prefix}.model"
with NamedTemporaryFile(mode="w") as f:
for t in manifest_by_split["train"]["tgt_text"]:
f.write(t + "\n")
f.flush() # needed to ensure gen_vocab sees dumped text
gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char")
# Generate speaker list
speakers = sorted({sample["speaker"] for sample in samples})
speakers_path = out_root / "speakers.txt"
with open(speakers_path, "w") as f:
for speaker in speakers:
f.write(f"{speaker}\n")
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"sample_rate": args.sample_rate,
"features": {
"type": "spectrogram+melscale+log",
"eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length, "sample_rate": args.sample_rate,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
if len(speakers) > 1:
extra["speaker_set_filename"] = "speakers.txt"
if args.add_fastspeech_targets:
pitch_min, pitch_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in pitch_paths.values()]
)
energy_min, energy_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in energy_paths.values()]
)
extra["features"]["pitch_min"] = pitch_min
extra["features"]["pitch_max"] = pitch_max
extra["features"]["energy_min"] = energy_min
extra["features"]["energy_max"] = energy_max
gen_config_yaml(
out_root, spm_filename=spm_filename, vocab_name=vocab_name,
audio_root=out_root.as_posix(), input_channels=None,
input_feat_per_channel=None, specaugment_policy=None,
cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest-root", "-m", required=True, type=str)
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train", "dev", "test"])
parser.add_argument("--ipa-vocab", action="store_true")
parser.add_argument("--use-g2p", action="store_true")
parser.add_argument("--lang", type=str, default="en-us")
parser.add_argument("--win-length", type=int, default=1024)
parser.add_argument("--hop-length", type=int, default=256)
parser.add_argument("--n-fft", type=int, default=1024)
parser.add_argument("--n-mels", type=int, default=80)
parser.add_argument("--f-min", type=int, default=20)
parser.add_argument("--f-max", type=int, default=8000)
parser.add_argument("--sample-rate", type=int, default=22050)
parser.add_argument("--normalize-volume", "-n", action="store_true")
parser.add_argument("--textgrid-zip", type=str, default=None)
parser.add_argument("--id-to-units-tsv", type=str, default=None)
parser.add_argument("--add-fastspeech-targets", action="store_true")
parser.add_argument("--snr-threshold", type=float, default=None)
parser.add_argument("--cer-threshold", type=float, default=None)
parser.add_argument("--cer-tsv-path", type=str, default="")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/get_feature_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
from typing import List, Dict, Tuple
import pandas as pd
import numpy as np
import torchaudio
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def get_top_n(
root: Path, n_speakers: int = 10, min_n_tokens: int = 5
) -> pd.DataFrame:
df = load_df_from_tsv(root / "validated.tsv")
df["n_tokens"] = [len(s.split()) for s in df["sentence"]]
df = df[df["n_tokens"] >= min_n_tokens]
df["n_frames"] = [
torchaudio.info((root / "clips" / p).as_posix()).num_frames
for p in tqdm(df["path"])
]
df["id"] = [Path(p).stem for p in df["path"]]
total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"])
total_duration_ms = total_duration_ms.sort_values("sum", ascending=False)
top_n_total_duration_ms = total_duration_ms.head(n_speakers)
top_n_client_ids = set(top_n_total_duration_ms.index.tolist())
df_top_n = df[df["client_id"].isin(top_n_client_ids)]
return df_top_n
def get_splits(
df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0
) -> Tuple[Dict[str, str], List[str]]:
np.random.seed(rand_seed)
dev_split_ratio = (1. - train_split_ratio) / 3
grouped = list(df.groupby("client_id"))
id_to_split = {}
for _, cur_df in tqdm(grouped):
cur_n_examples = len(cur_df)
if speaker_in_all_splits and cur_n_examples < 3:
continue
cur_n_train = int(cur_n_examples * train_split_ratio)
cur_n_dev = int(cur_n_examples * dev_split_ratio)
cur_n_test = cur_n_examples - cur_n_dev - cur_n_train
if speaker_in_all_splits and cur_n_dev * cur_n_test == 0:
cur_n_dev, cur_n_test = 1, 1
cur_n_train = cur_n_examples - cur_n_dev - cur_n_test
cur_indices = cur_df.index.tolist()
cur_shuffled_indices = np.random.permutation(cur_n_examples)
cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices]
cur_indices_by_split = {
"train": cur_shuffled_indices[:cur_n_train],
"dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev],
"test": cur_shuffled_indices[cur_n_train + cur_n_dev:]
}
for split in SPLITS:
for i in cur_indices_by_split[split]:
id_ = df["id"].loc[i]
id_to_split[id_] = split
return id_to_split, sorted(df["client_id"].unique())
def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000):
out_root = root / "wav"
out_root.mkdir(exist_ok=True, parents=True)
print("Converting to WAV...")
for n in tqdm(filenames):
in_path = (root / "clips" / n).as_posix()
waveform, sr = torchaudio.load(in_path)
converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor(
waveform, sr, [["rate", str(target_sr)], ["channels", "1"]]
)
out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix()
torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
data_root = Path(args.data_root).absolute() / args.lang
# Generate TSV manifest
print("Generating manifest...")
df_top_n = get_top_n(data_root)
id_to_split, speakers = get_splits(df_top_n)
if args.convert_to_wav:
convert_to_wav(data_root, df_top_n["path"].tolist())
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
for sample in tqdm(df_top_n.to_dict(orient="index").values()):
sample_id = sample["id"]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
if args.convert_to_wav:
audio_path = data_root / "wav" / f"{sample_id}.wav"
else:
audio_path = data_root / "clips" / f"{sample_id}.mp3"
manifest_by_split[split]["audio"].append(audio_path.as_posix())
manifest_by_split[split]["n_frames"].append(sample["n_frames"])
manifest_by_split[split]["tgt_text"].append(sample["sentence"])
manifest_by_split[split]["speaker"].append(sample["client_id"])
manifest_by_split[split]["src_text"].append(sample["sentence"])
output_root = Path(args.output_manifest_root).absolute()
output_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
output_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--lang", "-l", required=True, type=str)
parser.add_argument("--convert-to-wav", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import csv
import tempfile
from collections import defaultdict
from pathlib import Path
import torchaudio
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import pandas as pd
from tqdm import tqdm
from examples.speech_synthesis.preprocessing.denoiser.pretrained import master64
import examples.speech_synthesis.preprocessing.denoiser.utils as utils
from examples.speech_synthesis.preprocessing.vad import (
frame_generator, vad_collector, read_wave, write_wave, FS_MS, THRESHOLD,
SCALE
)
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
PATHS = ["after_denoise", "after_vad"]
MIN_T = 0.05
def generate_tmp_filename(extension="txt"):
return tempfile._get_default_tempdir() + "/" + \
next(tempfile._get_candidate_names()) + "." + extension
def convert_sr(inpath, sr, output_path=None):
if not output_path:
output_path = generate_tmp_filename("wav")
cmd = f"sox {inpath} -r {sr} {output_path}"
os.system(cmd)
return output_path
def apply_vad(vad, inpath):
audio, sample_rate = read_wave(inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE) * (b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE)) * (b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
return segment, sample_rate
def write(wav, filename, sr=16_000):
# Normalize audio if it prevents clipping
wav = wav / max(wav.abs().max().item(), 1)
torchaudio.save(filename, wav.cpu(), sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
# making sure we are requested either denoise or vad
if not args.denoise and not args.vad:
log.error("No denoise or vad is requested.")
return
log.info("Creating out directories...")
if args.denoise:
out_denoise = Path(args.output_dir).absolute().joinpath(PATHS[0])
out_denoise.mkdir(parents=True, exist_ok=True)
if args.vad:
out_vad = Path(args.output_dir).absolute().joinpath(PATHS[1])
out_vad.mkdir(parents=True, exist_ok=True)
log.info("Loading pre-trained speech enhancement model...")
model = master64().to(args.device)
log.info("Building the VAD model...")
vad = webrtcvad.Vad(int(args.vad_agg_level))
# preparing the output dict
output_dict = defaultdict(list)
log.info(f"Parsing input manifest: {args.audio_manifest}")
with open(args.audio_manifest, "r") as f:
manifest_dict = csv.DictReader(f, delimiter="\t")
for row in tqdm(manifest_dict):
filename = str(row["audio"])
final_output = filename
keep_sample = True
n_frames = row["n_frames"]
snr = -1
if args.denoise:
output_path_denoise = out_denoise.joinpath(Path(filename).name)
# convert to 16khz in case we use a differet sr
tmp_path = convert_sr(final_output, 16000)
# loading audio file and generating the enhanced version
out, sr = torchaudio.load(tmp_path)
out = out.to(args.device)
estimate = model(out)
estimate = (1 - args.dry_wet) * estimate + args.dry_wet * out
write(estimate[0], str(output_path_denoise), sr)
snr = utils.cal_snr(out, estimate)
snr = snr.cpu().detach().numpy()[0][0]
final_output = str(output_path_denoise)
if args.vad:
output_path_vad = out_vad.joinpath(Path(filename).name)
sr = torchaudio.info(final_output).sample_rate
if sr in [16000, 32000, 48000]:
tmp_path = final_output
elif sr < 16000:
tmp_path = convert_sr(final_output, 16000)
elif sr < 32000:
tmp_path = convert_sr(final_output, 32000)
else:
tmp_path = convert_sr(final_output, 48000)
# apply VAD
segment, sample_rate = apply_vad(vad, tmp_path)
if len(segment) < sample_rate * MIN_T:
keep_sample = False
print((
f"WARNING: skip {filename} because it is too short "
f"after VAD ({len(segment) / sample_rate} < {MIN_T})"
))
else:
if sample_rate != sr:
tmp_path = generate_tmp_filename("wav")
write_wave(tmp_path, segment, sample_rate)
convert_sr(tmp_path, sr,
output_path=str(output_path_vad))
else:
write_wave(str(output_path_vad), segment, sample_rate)
final_output = str(output_path_vad)
segment, _ = torchaudio.load(final_output)
n_frames = segment.size(1)
if keep_sample:
output_dict["id"].append(row["id"])
output_dict["audio"].append(final_output)
output_dict["n_frames"].append(n_frames)
output_dict["tgt_text"].append(row["tgt_text"])
output_dict["speaker"].append(row["speaker"])
output_dict["src_text"].append(row["src_text"])
output_dict["snr"].append(snr)
out_tsv_path = Path(args.output_dir) / Path(args.audio_manifest).name
log.info(f"Saving manifest to {out_tsv_path.as_posix()}")
save_df_to_tsv(pd.DataFrame.from_dict(output_dict), out_tsv_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest", "-i", required=True,
type=str, help="path to the input manifest.")
parser.add_argument(
"--output-dir", "-o", required=True, type=str,
help="path to the output dir. it will contain files after denoising and"
" vad"
)
parser.add_argument("--vad-agg-level", "-a", type=int, default=2,
help="the aggresive level of the vad [0-3].")
parser.add_argument(
"--dry-wet", "-dw", type=float, default=0.01,
help="the level of linear interpolation between noisy and enhanced "
"files."
)
parser.add_argument(
"--device", "-d", type=str, default="cpu",
help="the device to be used for the speech enhancement model: "
"cpu | cuda."
)
parser.add_argument("--denoise", action="store_true",
help="apply a denoising")
parser.add_argument("--vad", action="store_true", help="apply a VAD")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoise_and_vad_audio.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import librosa
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchaudio
EMBEDDER_PARAMS = {
'num_mels': 40,
'n_fft': 512,
'emb_dim': 256,
'lstm_hidden': 768,
'lstm_layers': 3,
'window': 80,
'stride': 40,
}
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary
computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class LinearNorm(nn.Module):
def __init__(self, hp):
super(LinearNorm, self).__init__()
self.linear_layer = nn.Linear(hp["lstm_hidden"], hp["emb_dim"])
def forward(self, x):
return self.linear_layer(x)
class SpeechEmbedder(nn.Module):
def __init__(self, hp):
super(SpeechEmbedder, self).__init__()
self.lstm = nn.LSTM(hp["num_mels"],
hp["lstm_hidden"],
num_layers=hp["lstm_layers"],
batch_first=True)
self.proj = LinearNorm(hp)
self.hp = hp
def forward(self, mel):
# (num_mels, T) -> (num_mels, T', window)
mels = mel.unfold(1, self.hp["window"], self.hp["stride"])
mels = mels.permute(1, 2, 0) # (T', window, num_mels)
x, _ = self.lstm(mels) # (T', window, lstm_hidden)
x = x[:, -1, :] # (T', lstm_hidden), use last frame only
x = self.proj(x) # (T', emb_dim)
x = x / torch.norm(x, p=2, dim=1, keepdim=True) # (T', emb_dim)
x = x.mean(dim=0)
if x.norm(p=2) != 0:
x = x / x.norm(p=2)
return x
class SpkrEmbedder(nn.Module):
RATE = 16000
def __init__(
self,
embedder_path,
embedder_params=EMBEDDER_PARAMS,
rate=16000,
hop_length=160,
win_length=400,
pad=False,
):
super(SpkrEmbedder, self).__init__()
embedder_pt = torch.load(embedder_path, map_location="cpu")
self.embedder = SpeechEmbedder(embedder_params)
self.embedder.load_state_dict(embedder_pt)
self.embedder.eval()
set_requires_grad(self.embedder, requires_grad=False)
self.embedder_params = embedder_params
self.register_buffer('mel_basis', torch.from_numpy(
librosa.filters.mel(
sr=self.RATE,
n_fft=self.embedder_params["n_fft"],
n_mels=self.embedder_params["num_mels"])
)
)
self.resample = None
if rate != self.RATE:
self.resample = torchaudio.transforms.Resample(rate, self.RATE)
self.hop_length = hop_length
self.win_length = win_length
self.pad = pad
def get_mel(self, y):
if self.pad and y.shape[-1] < 14000:
y = F.pad(y, (0, 14000 - y.shape[-1]))
window = torch.hann_window(self.win_length).to(y)
y = torch.stft(y, n_fft=self.embedder_params["n_fft"],
hop_length=self.hop_length,
win_length=self.win_length,
window=window)
magnitudes = torch.norm(y, dim=-1, p=2) ** 2
mel = torch.log10(self.mel_basis @ magnitudes + 1e-6)
return mel
def forward(self, inputs):
dvecs = []
for wav in inputs:
mel = self.get_mel(wav)
if mel.dim() == 3:
mel = mel.squeeze(0)
dvecs += [self.embedder(mel)]
dvecs = torch.stack(dvecs)
dvec = torch.mean(dvecs, dim=0)
dvec = dvec / torch.norm(dvec)
return dvec
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import logging
import torch.hub
from .demucs import Demucs
from .utils import deserialize_model
logger = logging.getLogger(__name__)
ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/"
DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th"
DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th"
MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th"
def _demucs(pretrained, url, **kwargs):
model = Demucs(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(state_dict)
return model
def dns48(pretrained=True):
return _demucs(pretrained, DNS_48_URL, hidden=48)
def dns64(pretrained=True):
return _demucs(pretrained, DNS_64_URL, hidden=64)
def master64(pretrained=True):
return _demucs(pretrained, MASTER_64_URL, hidden=64)
def add_model_flags(parser):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-m", "--model_path", help="Path to local trained model."
)
group.add_argument(
"--dns48", action="store_true",
help="Use pre-trained real time H=48 model trained on DNS."
)
group.add_argument(
"--dns64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS."
)
group.add_argument(
"--master64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS and Valentini."
)
def get_model(args):
"""
Load local model package or torchhub pre-trained model.
"""
if args.model_path:
logger.info("Loading model from %s", args.model_path)
pkg = torch.load(args.model_path)
model = deserialize_model(pkg)
elif args.dns64:
logger.info("Loading pre-trained real time H=64 model trained on DNS.")
model = dns64()
elif args.master64:
logger.info(
"Loading pre-trained real time H=64 model trained on DNS and Valentini."
)
model = master64()
else:
logger.info("Loading pre-trained real time H=48 model trained on DNS.")
model = dns48()
logger.debug(model)
return model
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import torch as th
from torch.nn import functional as F
def sinc(t):
"""sinc.
:param t: the input tensor
"""
return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype),
th.sin(t) / t)
def kernel_upsample2(zeros=56):
"""kernel_upsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t *= math.pi
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def upsample2(x, zeros=56):
"""
Upsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
*other, time = x.shape
kernel = kernel_upsample2(zeros).to(x)
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(
*other, time
)
y = th.stack([x, out], dim=-1)
return y.view(*other, -1)
def kernel_downsample2(zeros=56):
"""kernel_downsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t.mul_(math.pi)
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def downsample2(x, zeros=56):
"""
Downsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
if x.shape[-1] % 2 != 0:
x = F.pad(x, (0, 1))
xeven = x[..., ::2]
xodd = x[..., 1::2]
*other, time = xodd.shape
kernel = kernel_downsample2(zeros).to(x)
out = xeven + F.conv1d(
xodd.view(-1, 1, time), kernel, padding=zeros
)[..., :-1].view(*other, time)
return out.view(*other, -1).mul(0.5)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoiser/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import functools
import logging
from contextlib import contextmanager
import inspect
import time
logger = logging.getLogger(__name__)
EPS = 1e-8
def capture_init(init):
"""capture_init.
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
"""
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__
def deserialize_model(package, strict=False):
"""deserialize_model.
"""
klass = package['class']
if strict:
model = klass(*package['args'], **package['kwargs'])
else:
sig = inspect.signature(klass)
kw = package['kwargs']
for key in list(kw):
if key not in sig.parameters:
logger.warning("Dropping inexistant parameter %s", key)
del kw[key]
model = klass(*package['args'], **kw)
model.load_state_dict(package['state'])
return model
def copy_state(state):
return {k: v.cpu().clone() for k, v in state.items()}
def serialize_model(model):
args, kwargs = model._init_args_kwargs
state = copy_state(model.state_dict())
return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state}
@contextmanager
def swap_state(model, state):
"""
Context manager that swaps the state of a model, e.g:
# model is in old state
with swap_state(model, new_state):
# model in new state
# model back to old state
"""
old_state = copy_state(model.state_dict())
model.load_state_dict(state)
try:
yield
finally:
model.load_state_dict(old_state)
def pull_metric(history, name):
out = []
for metrics in history:
if name in metrics:
out.append(metrics[name])
return out
class LogProgress:
"""
Sort of like tqdm but using log lines and not as real time.
Args:
- logger: logger obtained from `logging.getLogger`,
- iterable: iterable object to wrap
- updates (int): number of lines that will be printed, e.g.
if `updates=5`, log every 1/5th of the total length.
- total (int): length of the iterable, in case it does not support
`len`.
- name (str): prefix to use in the log.
- level: logging level (like `logging.INFO`).
"""
def __init__(self,
logger,
iterable,
updates=5,
total=None,
name="LogProgress",
level=logging.INFO):
self.iterable = iterable
self.total = total or len(iterable)
self.updates = updates
self.name = name
self.logger = logger
self.level = level
def update(self, **infos):
self._infos = infos
def __iter__(self):
self._iterator = iter(self.iterable)
self._index = -1
self._infos = {}
self._begin = time.time()
return self
def __next__(self):
self._index += 1
try:
value = next(self._iterator)
except StopIteration:
raise
else:
return value
finally:
log_every = max(1, self.total // self.updates)
# logging is delayed by 1 it, in order to have the metrics from update
if self._index >= 1 and self._index % log_every == 0:
self._log()
def _log(self):
self._speed = (1 + self._index) / (time.time() - self._begin)
infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items())
if self._speed < 1e-4:
speed = "oo sec/it"
elif self._speed < 0.1:
speed = f"{1/self._speed:.1f} sec/it"
else:
speed = f"{self._speed:.1f} it/sec"
out = f"{self.name} | {self._index}/{self.total} | {speed}"
if infos:
out += " | " + infos
self.logger.log(self.level, out)
def colorize(text, color):
"""
Display text with some ANSI color in the terminal.
"""
code = f"\033[{color}m"
restore = "\033[0m"
return "".join([code, text, restore])
def bold(text):
"""
Display text in bold in the terminal.
"""
return colorize(text, "1")
def cal_snr(lbl, est):
import torch
y = 10.0 * torch.log10(
torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) +
EPS
)
return y
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoiser/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import time
import torch as th
from torch import nn
from torch.nn import functional as F
from .resample import downsample2, upsample2
from .utils import capture_init
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(
bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim
)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class Demucs(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
"""
@capture_init
def __init__(self,
chin=1,
chout=1,
hidden=48,
depth=5,
kernel_size=8,
stride=4,
causal=True,
resample=4,
growth=2,
max_hidden=10_000,
normalize=True,
glu=True,
rescale=0.1,
floor=1e-3):
super().__init__()
if resample not in [1, 2, 4]:
raise ValueError("Resample should be 1, 2 or 4.")
self.chin = chin
self.chout = chout
self.hidden = hidden
self.depth = depth
self.kernel_size = kernel_size
self.stride = stride
self.causal = causal
self.floor = floor
self.resample = resample
self.normalize = normalize
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
activation = nn.GLU(1) if glu else nn.ReLU()
ch_scale = 2 if glu else 1
for index in range(depth):
encode = []
encode += [
nn.Conv1d(chin, hidden, kernel_size, stride),
nn.ReLU(),
nn.Conv1d(hidden, hidden * ch_scale, 1), activation,
]
self.encoder.append(nn.Sequential(*encode))
decode = []
decode += [
nn.Conv1d(hidden, ch_scale * hidden, 1), activation,
nn.ConvTranspose1d(hidden, chout, kernel_size, stride),
]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
chout = hidden
chin = hidden
hidden = min(int(growth * hidden), max_hidden)
self.lstm = BLSTM(chin, bi=not causal)
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
"""
Return the nearest valid length to use with the model so that
there is no time steps left over in a convolutions, e.g. for all
layers, size of the input - kernel_size % stride = 0.
If the mixture has a valid length, the estimated sources
will have exactly the same length.
"""
length = math.ceil(length * self.resample)
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(length, 1)
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
length = int(math.ceil(length / self.resample))
return int(length)
@property
def total_stride(self):
return self.stride ** self.depth // self.resample
def forward(self, mix):
if mix.dim() == 2:
mix = mix.unsqueeze(1)
if self.normalize:
mono = mix.mean(dim=1, keepdim=True)
std = mono.std(dim=-1, keepdim=True)
mix = mix / (self.floor + std)
else:
std = 1
length = mix.shape[-1]
x = mix
x = F.pad(x, (0, self.valid_length(length) - length))
if self.resample == 2:
x = upsample2(x)
elif self.resample == 4:
x = upsample2(x)
x = upsample2(x)
skips = []
for encode in self.encoder:
x = encode(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, _ = self.lstm(x)
x = x.permute(1, 2, 0)
for decode in self.decoder:
skip = skips.pop(-1)
x = x + skip[..., :x.shape[-1]]
x = decode(x)
if self.resample == 2:
x = downsample2(x)
elif self.resample == 4:
x = downsample2(x)
x = downsample2(x)
x = x[..., :length]
return std * x
def fast_conv(conv, x):
"""
Faster convolution evaluation if either kernel size is 1
or length of sequence is 1.
"""
batch, chin, length = x.shape
chout, chin, kernel = conv.weight.shape
assert batch == 1
if kernel == 1:
x = x.view(chin, length)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin), x)
elif length == kernel:
x = x.view(chin * kernel, 1)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin * kernel), x)
else:
out = conv(x)
return out.view(batch, chout, -1)
class DemucsStreamer:
"""
Streaming implementation for Demucs. It supports being fed with any amount
of audio at a time. You will get back as much audio as possible at that
point.
Args:
- demucs (Demucs): Demucs model.
- dry (float): amount of dry (e.g. input) signal to keep. 0 is maximum
noise removal, 1 just returns the input signal. Small values > 0
allows to limit distortions.
- num_frames (int): number of frames to process at once. Higher values
will increase overall latency but improve the real time factor.
- resample_lookahead (int): extra lookahead used for the resampling.
- resample_buffer (int): size of the buffer of previous inputs/outputs
kept for resampling.
"""
def __init__(self, demucs,
dry=0,
num_frames=1,
resample_lookahead=64,
resample_buffer=256):
device = next(iter(demucs.parameters())).device
self.demucs = demucs
self.lstm_state = None
self.conv_state = None
self.dry = dry
self.resample_lookahead = resample_lookahead
resample_buffer = min(demucs.total_stride, resample_buffer)
self.resample_buffer = resample_buffer
self.frame_length = demucs.valid_length(1) + \
demucs.total_stride * (num_frames - 1)
self.total_length = self.frame_length + self.resample_lookahead
self.stride = demucs.total_stride * num_frames
self.resample_in = th.zeros(demucs.chin, resample_buffer, device=device)
self.resample_out = th.zeros(
demucs.chin, resample_buffer, device=device
)
self.frames = 0
self.total_time = 0
self.variance = 0
self.pending = th.zeros(demucs.chin, 0, device=device)
bias = demucs.decoder[0][2].bias
weight = demucs.decoder[0][2].weight
chin, chout, kernel = weight.shape
self._bias = bias.view(-1, 1).repeat(1, kernel).view(-1, 1)
self._weight = weight.permute(1, 2, 0).contiguous()
def reset_time_per_frame(self):
self.total_time = 0
self.frames = 0
@property
def time_per_frame(self):
return self.total_time / self.frames
def flush(self):
"""
Flush remaining audio by padding it with zero. Call this
when you have no more input and want to get back the last chunk of audio.
"""
pending_length = self.pending.shape[1]
padding = th.zeros(
self.demucs.chin, self.total_length, device=self.pending.device
)
out = self.feed(padding)
return out[:, :pending_length]
def feed(self, wav):
"""
Apply the model to mix using true real time evaluation.
Normalization is done online as is the resampling.
"""
begin = time.time()
demucs = self.demucs
resample_buffer = self.resample_buffer
stride = self.stride
resample = demucs.resample
if wav.dim() != 2:
raise ValueError("input wav should be two dimensional.")
chin, _ = wav.shape
if chin != demucs.chin:
raise ValueError(f"Expected {demucs.chin} channels, got {chin}")
self.pending = th.cat([self.pending, wav], dim=1)
outs = []
while self.pending.shape[1] >= self.total_length:
self.frames += 1
frame = self.pending[:, :self.total_length]
dry_signal = frame[:, :stride]
if demucs.normalize:
mono = frame.mean(0)
variance = (mono**2).mean()
self.variance = variance / self.frames + \
(1 - 1 / self.frames) * self.variance
frame = frame / (demucs.floor + math.sqrt(self.variance))
frame = th.cat([self.resample_in, frame], dim=-1)
self.resample_in[:] = frame[:, stride - resample_buffer:stride]
if resample == 4:
frame = upsample2(upsample2(frame))
elif resample == 2:
frame = upsample2(frame)
# remove pre sampling buffer
frame = frame[:, resample * resample_buffer:]
# remove extra samples after window
frame = frame[:, :resample * self.frame_length]
out, extra = self._separate_frame(frame)
padded_out = th.cat([self.resample_out, out, extra], 1)
self.resample_out[:] = out[:, -resample_buffer:]
if resample == 4:
out = downsample2(downsample2(padded_out))
elif resample == 2:
out = downsample2(padded_out)
else:
out = padded_out
out = out[:, resample_buffer // resample:]
out = out[:, :stride]
if demucs.normalize:
out *= math.sqrt(self.variance)
out = self.dry * dry_signal + (1 - self.dry) * out
outs.append(out)
self.pending = self.pending[:, stride:]
self.total_time += time.time() - begin
if outs:
out = th.cat(outs, 1)
else:
out = th.zeros(chin, 0, device=wav.device)
return out
def _separate_frame(self, frame):
demucs = self.demucs
skips = []
next_state = []
first = self.conv_state is None
stride = self.stride * demucs.resample
x = frame[None]
for idx, encode in enumerate(demucs.encoder):
stride //= demucs.stride
length = x.shape[2]
if idx == demucs.depth - 1:
# This is sligthly faster for the last conv
x = fast_conv(encode[0], x)
x = encode[1](x)
x = fast_conv(encode[2], x)
x = encode[3](x)
else:
if not first:
prev = self.conv_state.pop(0)
prev = prev[..., stride:]
tgt = (length - demucs.kernel_size) // demucs.stride + 1
missing = tgt - prev.shape[-1]
offset = length - demucs.kernel_size - \
demucs.stride * (missing - 1)
x = x[..., offset:]
x = encode[1](encode[0](x))
x = fast_conv(encode[2], x)
x = encode[3](x)
if not first:
x = th.cat([prev, x], -1)
next_state.append(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, self.lstm_state = demucs.lstm(x, self.lstm_state)
x = x.permute(1, 2, 0)
# In the following, x contains only correct samples, i.e. the one
# for which each time position is covered by two window of the upper
# layer. extra contains extra samples to the right, and is used only as
# a better padding for the online resampling.
extra = None
for idx, decode in enumerate(demucs.decoder):
skip = skips.pop(-1)
x += skip[..., :x.shape[-1]]
x = fast_conv(decode[0], x)
x = decode[1](x)
if extra is not None:
skip = skip[..., x.shape[-1]:]
extra += skip[..., :extra.shape[-1]]
extra = decode[2](decode[1](decode[0](extra)))
x = decode[2](x)
next_state.append(
x[..., -demucs.stride:] - decode[2].bias.view(-1, 1)
)
if extra is None:
extra = x[..., -demucs.stride:]
else:
extra[..., :demucs.stride] += next_state[-1]
x = x[..., :-demucs.stride]
if not first:
prev = self.conv_state.pop(0)
x[..., :demucs.stride] += prev
if idx != demucs.depth - 1:
x = decode[3](x)
extra = decode[3](extra)
self.conv_state = next_state
return x[0], extra[0]
def test():
import argparse
parser = argparse.ArgumentParser(
"denoiser.demucs",
description="Benchmark the streaming Demucs implementation, as well as "
"checking the delta with the offline implementation.")
parser.add_argument("--depth", default=5, type=int)
parser.add_argument("--resample", default=4, type=int)
parser.add_argument("--hidden", default=48, type=int)
parser.add_argument("--sample_rate", default=16000, type=float)
parser.add_argument("--device", default="cpu")
parser.add_argument("-t", "--num_threads", type=int)
parser.add_argument("-f", "--num_frames", type=int, default=1)
args = parser.parse_args()
if args.num_threads:
th.set_num_threads(args.num_threads)
sr = args.sample_rate
sr_ms = sr / 1000
demucs = Demucs(
depth=args.depth, hidden=args.hidden, resample=args.resample
).to(args.device)
x = th.randn(1, int(sr * 4)).to(args.device)
out = demucs(x[None])[0]
streamer = DemucsStreamer(demucs, num_frames=args.num_frames)
out_rt = []
frame_size = streamer.total_length
with th.no_grad():
while x.shape[1] > 0:
out_rt.append(streamer.feed(x[:, :frame_size]))
x = x[:, frame_size:]
frame_size = streamer.demucs.total_stride
out_rt.append(streamer.flush())
out_rt = th.cat(out_rt, 1)
model_size = sum(p.numel() for p in demucs.parameters()) * 4 / 2**20
initial_lag = streamer.total_length / sr_ms
tpf = 1000 * streamer.time_per_frame
print(f"model size: {model_size:.1f}MB, ", end='')
print(f"delta batch/streaming: {th.norm(out - out_rt) / th.norm(out):.2%}")
print(f"initial lag: {initial_lag:.1f}ms, ", end='')
print(f"stride: {streamer.stride * args.num_frames / sr_ms:.1f}ms")
print(f"time per frame: {tpf:.1f}ms, ", end='')
rtf = (1000 * streamer.time_per_frame) / (streamer.stride / sr_ms)
print(f"RTF: {rtf:.2f}")
print(f"Total lag with computation: {initial_lag + tpf:.1f}ms")
if __name__ == "__main__":
test()
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/denoiser/demucs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import contextlib
import wave
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import argparse
import os
import logging
from tqdm import tqdm
AUDIO_SUFFIX = '.wav'
FS_MS = 30
SCALE = 6e-5
THRESHOLD = 0.3
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
# sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, _ in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
def main(args):
# create output folder
try:
cmd = f"mkdir -p {args.out_path}"
os.system(cmd)
except Exception:
logging.error("Can not create output folder")
exit(-1)
# build vad object
vad = webrtcvad.Vad(int(args.agg))
# iterating over wavs in dir
for file in tqdm(os.listdir(args.in_path)):
if file.endswith(AUDIO_SUFFIX):
audio_inpath = os.path.join(args.in_path, file)
audio_outpath = os.path.join(args.out_path, file)
audio, sample_rate = read_wave(audio_inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE)*(b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE))*(b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
write_wave(audio_outpath, segment, sample_rate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply vad to a file of fils.')
parser.add_argument('in_path', type=str, help='Path to the input files')
parser.add_argument('out_path', type=str,
help='Path to save the processed files')
parser.add_argument('--agg', type=int, default=3,
help='The level of aggressiveness of the VAD: [0-3]')
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/speech_synthesis/preprocessing/vad/__init__.py |
from . import criterions, models, tasks # noqa
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/__init__.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Score raw text with a trained model.
"""
from collections import namedtuple
import logging
from multiprocessing import Pool
import sys
import os
import random
import numpy as np
import sacrebleu
import torch
from fairseq import checkpoint_utils, options, utils
logger = logging.getLogger("fairseq_cli.drnmt_rerank")
logger.setLevel(logging.INFO)
Batch = namedtuple("Batch", "ids src_tokens src_lengths")
pool_init_variables = {}
def init_loaded_scores(mt_scores, model_scores, hyp, ref):
global pool_init_variables
pool_init_variables["mt_scores"] = mt_scores
pool_init_variables["model_scores"] = model_scores
pool_init_variables["hyp"] = hyp
pool_init_variables["ref"] = ref
def parse_fairseq_gen(filename, task):
source = {}
hypos = {}
scores = {}
with open(filename, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line.startswith("S-"): # source
uid, text = line.split("\t", 1)
uid = int(uid[2:])
source[uid] = text
elif line.startswith("D-"): # hypo
uid, score, text = line.split("\t", 2)
uid = int(uid[2:])
if uid not in hypos:
hypos[uid] = []
scores[uid] = []
hypos[uid].append(text)
scores[uid].append(float(score))
else:
continue
source_out = [source[i] for i in range(len(hypos))]
hypos_out = [h for i in range(len(hypos)) for h in hypos[i]]
scores_out = [s for i in range(len(scores)) for s in scores[i]]
return source_out, hypos_out, scores_out
def read_target(filename):
with open(filename, "r", encoding="utf-8") as f:
output = [line.strip() for line in f]
return output
def make_batches(args, src, hyp, task, max_positions, encode_fn):
assert len(src) * args.beam == len(
hyp
), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead."
hyp_encode = [
task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long()
for h in hyp
]
if task.cfg.include_src:
src_encode = [
task.source_dictionary.encode_line(
encode_fn(s), add_if_not_exist=False
).long()
for s in src
]
tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)]
lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens]
else:
tokens = [(h,) for h in hyp_encode]
lengths = [(h.numel(),) for h in hyp_encode]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch["id"],
src_tokens=batch["net_input"]["src_tokens"],
src_lengths=batch["net_input"]["src_lengths"],
)
def decode_rerank_scores(args):
if args.max_tokens is None and args.batch_size is None:
args.batch_size = 1
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load ensemble
logger.info("loading model(s) from {}".format(args.path))
models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path], arg_overrides=eval(args.model_overrides),
)
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(args)
bpe = task.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task)
model_scores = {}
logger.info("decode reranker score")
for batch in make_batches(args, src, hyp, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths},
}
scores = task.inference_step(generator, models, sample)
for id, sc in zip(batch.ids.tolist(), scores.tolist()):
model_scores[id] = sc[0]
model_scores = [model_scores[i] for i in range(len(model_scores))]
return src, hyp, mt_scores, model_scores
def get_score(mt_s, md_s, w1, lp, tgt_len):
return mt_s / (tgt_len ** lp) * w1 + md_s
def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam):
assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos)
hypo_scores = []
best_hypos = []
best_scores = []
offset = 0
for i in range(len(hypos)):
tgt_len = len(hypos[i].split())
hypo_scores.append(
get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len)
)
if (i + 1) % beam == 0:
max_i = np.argmax(hypo_scores)
best_hypos.append(hypos[offset + max_i])
best_scores.append(hypo_scores[max_i])
hypo_scores = []
offset += beam
return best_hypos, best_scores
def eval_metric(args, hypos, ref):
if args.metric == "bleu":
score = sacrebleu.corpus_bleu(hypos, [ref]).score
else:
score = sacrebleu.corpus_ter(hypos, [ref]).score
return score
def score_target_hypo(args, fw_weight, lp):
mt_scores = pool_init_variables["mt_scores"]
model_scores = pool_init_variables["model_scores"]
hyp = pool_init_variables["hyp"]
ref = pool_init_variables["ref"]
best_hypos, _ = get_best_hyps(
mt_scores, model_scores, hyp, fw_weight, lp, args.beam
)
rerank_eval = None
if ref:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}")
return rerank_eval
def print_result(best_scores, best_hypos, output_file):
for i, (s, h) in enumerate(zip(best_scores, best_hypos)):
print(f"{i}\t{s}\t{h}", file=output_file)
def main(args):
utils.import_user_module(args)
src, hyp, mt_scores, model_scores = decode_rerank_scores(args)
assert (
not args.tune or args.target_text is not None
), "--target-text has to be set when tuning weights"
if args.target_text:
ref = read_target(args.target_text)
assert len(src) == len(
ref
), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})"
orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)]
orig_eval = eval_metric(args, orig_best_hypos, ref)
if args.tune:
logger.info("tune weights for reranking")
random_params = np.array(
[
[
random.uniform(
args.lower_bound_fw_weight, args.upper_bound_fw_weight
),
random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen),
]
for k in range(args.num_trials)
]
)
logger.info("launching pool")
with Pool(
32,
initializer=init_loaded_scores,
initargs=(mt_scores, model_scores, hyp, ref),
) as p:
rerank_scores = p.starmap(
score_target_hypo,
[
(args, random_params[i][0], random_params[i][1],)
for i in range(args.num_trials)
],
)
if args.metric == "bleu":
best_index = np.argmax(rerank_scores)
else:
best_index = np.argmin(rerank_scores)
best_fw_weight = random_params[best_index][0]
best_lenpen = random_params[best_index][1]
else:
assert (
args.lenpen is not None and args.fw_weight is not None
), "--lenpen and --fw-weight should be set"
best_fw_weight, best_lenpen = args.fw_weight, args.lenpen
best_hypos, best_scores = get_best_hyps(
mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam
)
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(
args.results_path, "generate-{}.txt".format(args.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as o:
print_result(best_scores, best_hypos, o)
else:
print_result(best_scores, best_hypos, sys.stdout)
if args.target_text:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"before reranking, {args.metric.upper()}:", orig_eval)
print(
f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:",
rerank_eval,
)
def cli_main():
parser = options.get_generation_parser(interactive=True)
parser.add_argument(
"--in-text",
default=None,
required=True,
help="text from fairseq-interactive output, containing source sentences and hypotheses",
)
parser.add_argument("--target-text", default=None, help="reference text")
parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu")
parser.add_argument(
"--tune",
action="store_true",
help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking",
)
parser.add_argument(
"--lower-bound-fw-weight",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-fw-weight",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--lower-bound-lenpen",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-lenpen",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--fw-weight", type=float, default=None, help="weight on the fw model score"
)
parser.add_argument(
"--num-trials",
default=1000,
type=int,
help="number of trials to do for random search",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py |
from .discriminative_reranking_task import DiscriminativeRerankingNMTTask
__all__ = [
"DiscriminativeRerankingNMTTask",
]
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import logging
import os
import numpy as np
import torch
from fairseq.logging import metrics
from fairseq.data import (
ConcatDataset,
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
indexed_dataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
TokenBlockDataset,
)
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
EVAL_BLEU_ORDER = 4
TARGET_METRIC_CHOICES = ChoiceEnum(["bleu", "ter"])
logger = logging.getLogger(__name__)
@dataclass
class DiscriminativeRerankingNMTConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
num_data_splits: int = field(
default=1, metadata={"help": "total number of data splits"}
)
no_shuffle: bool = field(
default=False, metadata={"help": "do not shuffle training data"}
)
max_positions: int = field(
default=512, metadata={"help": "number of positional embeddings to learn"}
)
include_src: bool = field(
default=False, metadata={"help": "include source sentence"}
)
mt_beam: int = field(default=50, metadata={"help": "beam size of input hypotheses"})
eval_target_metric: bool = field(
default=False,
metadata={"help": "evaluation with the target metric during validation"},
)
target_metric: TARGET_METRIC_CHOICES = field(
default="bleu", metadata={"help": "name of the target metric to optimize for"}
)
train_subset: str = field(
default=II("dataset.train_subset"),
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
seed: int = field(
default=II("common.seed"),
metadata={"help": "pseudo random number generator seed"},
)
class RerankerScorer(object):
"""Scores the target for a given (source (optional), target) input."""
def __init__(self, args, mt_beam):
self.mt_beam = mt_beam
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
assert len(models) == 1, "does not support model ensemble"
model = models[0]
bs = net_input["src_tokens"].shape[0]
assert (
model.joint_classification == "none" or bs % self.mt_beam == 0
), f"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})"
model.eval()
logits = model(**net_input)
batch_out = model.sentence_forward(logits, net_input["src_tokens"])
if model.joint_classification == "sent":
batch_out = model.joint_forward(
batch_out.view(self.mt_beam, bs // self.mt_beam, -1)
)
scores = model.classification_forward(
batch_out.view(bs, 1, -1)
) # input: B x T x C
return scores
@register_task(
"discriminative_reranking_nmt", dataclass=DiscriminativeRerankingNMTConfig
)
class DiscriminativeRerankingNMTTask(FairseqTask):
"""
Translation rerank task.
The input can be either (src, tgt) sentence pairs or tgt sentence only.
"""
cfg: DiscriminativeRerankingNMTConfig
def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None):
super().__init__(cfg)
self.dictionary = data_dictionary
self._max_positions = cfg.max_positions
# args.tokens_per_sample = self._max_positions
# self.num_classes = 1 # for model
@classmethod
def load_dictionary(cls, cfg, filename):
"""Load the dictionary from the filename"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>") # for loading pretrained XLMR model
return dictionary
@classmethod
def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs):
# load data dictionary (assume joint dictionary)
data_path = cfg.data
data_dict = cls.load_dictionary(
cfg, os.path.join(data_path, "input_src/dict.txt")
)
logger.info("[input] src dictionary: {} types".format(len(data_dict)))
return DiscriminativeRerankingNMTTask(cfg, data_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
if self.cfg.data.endswith("1"):
data_shard = (epoch - 1) % self.cfg.num_data_splits + 1
data_path = self.cfg.data[:-1] + str(data_shard)
else:
data_path = self.cfg.data
def get_path(type, data_split):
return os.path.join(data_path, str(type), data_split)
def make_dataset(type, dictionary, data_split, combine):
split_path = get_path(type, data_split)
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
combine=combine,
)
return dataset
def load_split(data_split, metric):
input_src = None
if self.cfg.include_src:
input_src = make_dataset(
"input_src", self.dictionary, data_split, combine=False
)
assert input_src is not None, "could not find dataset: {}".format(
get_path("input_src", data_split)
)
input_tgt = make_dataset(
"input_tgt", self.dictionary, data_split, combine=False
)
assert input_tgt is not None, "could not find dataset: {}".format(
get_path("input_tgt", data_split)
)
label_path = f"{get_path(metric, data_split)}.{metric}"
assert os.path.exists(label_path), f"could not find dataset: {label_path}"
np_labels = np.loadtxt(label_path)
if self.cfg.target_metric == "ter":
np_labels = -np_labels
label = RawLabelDataset(np_labels)
return input_src, input_tgt, label
src_datasets = []
tgt_datasets = []
label_datasets = []
if split == self.cfg.train_subset:
for k in itertools.count():
split_k = "train" + (str(k) if k > 0 else "")
prefix = os.path.join(data_path, "input_tgt", split_k)
if not indexed_dataset.dataset_exists(prefix, impl=None):
if k > 0:
break
else:
raise FileNotFoundError(f"Dataset not found: {prefix}")
input_src, input_tgt, label = load_split(
split_k, self.cfg.target_metric
)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
else:
input_src, input_tgt, label = load_split(split, self.cfg.target_metric)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
if len(tgt_datasets) == 1:
input_tgt, label = tgt_datasets[0], label_datasets[0]
if self.cfg.include_src:
input_src = src_datasets[0]
else:
input_tgt = ConcatDataset(tgt_datasets)
label = ConcatDataset(label_datasets)
if self.cfg.include_src:
input_src = ConcatDataset(src_datasets)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
src_lengths = NumelDataset(input_src, reduce=False)
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
else:
src_tokens = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
"target": label,
}
dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
assert (
len(dataset) % self.cfg.mt_beam == 0
), "dataset size (%d) is not a multiple of beam size (%d)" % (
len(dataset),
self.cfg.mt_beam,
)
# no need to shuffle valid/test sets
if not self.cfg.no_shuffle and split == self.cfg.train_subset:
# need to keep all hypothese together
start_idx = np.arange(0, len(dataset), self.cfg.mt_beam)
with data_utils.numpy_seed(self.cfg.seed + epoch):
np.random.shuffle(start_idx)
idx = np.arange(0, self.cfg.mt_beam)
shuffle = np.tile(idx, (len(start_idx), 1)).reshape(-1) + np.tile(
start_idx, (self.cfg.mt_beam, 1)
).transpose().reshape(-1)
dataset = SortDataset(
dataset,
sort_order=[shuffle],
)
logger.info(f"Loaded {split} with #samples: {len(dataset)}")
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
assert not self.cfg.include_src or len(src_tokens[0]) == 2
input_src = None
if self.cfg.include_src:
input_src = TokenBlockDataset(
[t[0] for t in src_tokens],
[l[0] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
input_tgt = TokenBlockDataset(
[t[-1] for t in src_tokens],
[l[-1] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
src_lengths = NumelDataset(input_src, reduce=False)
else:
input_tgt = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_tokens = input_tgt
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
return NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
def build_model(self, cfg: FairseqDataclass, from_checkpoint: bool = False):
return super().build_model(cfg)
def build_generator(self, args):
return RerankerScorer(args, mt_beam=self.cfg.mt_beam)
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def create_dummy_batch(self, device):
dummy_target = (
torch.zeros(self.cfg.mt_beam, EVAL_BLEU_ORDER * 2 + 3).long().to(device)
if not self.cfg.eval_ter
else torch.zeros(self.cfg.mt_beam, 3).long().to(device)
)
return {
"id": torch.zeros(self.cfg.mt_beam, 1).long().to(device),
"net_input": {
"src_tokens": torch.zeros(self.cfg.mt_beam, 4).long().to(device),
"src_lengths": torch.ones(self.cfg.mt_beam, 1).long().to(device),
},
"nsentences": 0,
"ntokens": 0,
"target": dummy_target,
}
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
if ignore_grad and sample is None:
sample = self.create_dummy_batch(model.device)
return super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
def valid_step(self, sample, model, criterion):
if sample is None:
sample = self.create_dummy_batch(model.device)
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if not self.cfg.eval_target_metric:
return loss, sample_size, logging_output
scores = logging_output["scores"]
if self.cfg.target_metric == "bleu":
assert sample["target"].shape[1] == EVAL_BLEU_ORDER * 2 + 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating BLEU"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
bleu_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_bleu_sys_len"] = bleu_data[0]
logging_output["_bleu_ref_len"] = bleu_data[1]
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu_data[2 + i]
logging_output["_bleu_totals_" + str(i)] = bleu_data[
2 + EVAL_BLEU_ORDER + i
]
elif self.cfg.target_metric == "ter":
assert sample["target"].shape[1] == 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating TER"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
ter_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_ter_num_edits"] = -ter_data[0]
logging_output["_ter_ref_len"] = -ter_data[1]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if not self.cfg.eval_target_metric:
return
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
if self.cfg.target_metric == "bleu":
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth,
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
elif self.cfg.target_metric == "ter":
num_edits = sum_logs("_ter_num_edits")
ref_len = sum_logs("_ter_ref_len")
if ref_len > 0:
metrics.log_scalar("_ter_num_edits", num_edits)
metrics.log_scalar("_ter_ref_len", ref_len)
def compute_ter(meters):
score = meters["_ter_num_edits"].sum / meters["_ter_ref_len"].sum
return round(score.item(), 2)
metrics.log_derived("ter", compute_ter)
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py |
from .discriminative_reranking_model import DiscriminativeNMTReranker
__all__ = [
"DiscriminativeNMTReranker",
]
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/models/__init__.py |
from dataclasses import dataclass, field
import os
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
BaseFairseqModel,
register_model,
)
from fairseq.models.roberta.model import RobertaClassificationHead
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
TransformerSentenceEncoderLayer,
)
ACTIVATION_FN_CHOICES = ChoiceEnum(utils.get_available_activation_fns())
JOINT_CLASSIFICATION_CHOICES = ChoiceEnum(["none", "sent"])
SENTENCE_REP_CHOICES = ChoiceEnum(["head", "meanpool", "maxpool"])
def update_init_roberta_model_state(state):
"""
update the state_dict of a Roberta model for initializing
weights of the BertRanker
"""
for k in list(state.keys()):
if ".lm_head." in k or "version" in k:
del state[k]
continue
# remove 'encoder/decoder.sentence_encoder.' from the key
assert k.startswith("encoder.sentence_encoder.") or k.startswith(
"decoder.sentence_encoder."
), f"Cannot recognize parameter name {k}"
if "layernorm_embedding" in k:
new_k = k.replace(".layernorm_embedding.", ".emb_layer_norm.")
state[new_k[25:]] = state[k]
else:
state[k[25:]] = state[k]
del state[k]
class BaseRanker(nn.Module):
def __init__(self, args, task):
super().__init__()
self.separator_token = task.dictionary.eos()
self.padding_idx = task.dictionary.pad()
def forward(self, src_tokens):
raise NotImplementedError
def get_segment_labels(self, src_tokens):
segment_boundary = (src_tokens == self.separator_token).long()
segment_labels = (
segment_boundary.cumsum(dim=1)
- segment_boundary
- (src_tokens == self.padding_idx).long()
)
return segment_labels
def get_positions(self, src_tokens, segment_labels):
segment_positions = (
torch.arange(src_tokens.shape[1])
.to(src_tokens.device)
.repeat(src_tokens.shape[0], 1)
)
segment_boundary = (src_tokens == self.separator_token).long()
_, col_idx = (segment_positions * segment_boundary).nonzero(as_tuple=True)
col_idx = torch.cat([torch.zeros(1).type_as(col_idx), col_idx])
offset = torch.cat(
[
torch.zeros(1).type_as(segment_boundary),
segment_boundary.sum(dim=1).cumsum(dim=0)[:-1],
]
)
segment_positions -= col_idx[segment_labels + offset.unsqueeze(1)] * (
segment_labels != 0
)
padding_mask = src_tokens.ne(self.padding_idx)
segment_positions = (segment_positions + 1) * padding_mask.type_as(
segment_positions
) + self.padding_idx
return segment_positions
class BertRanker(BaseRanker):
def __init__(self, args, task):
super(BertRanker, self).__init__(args, task)
init_model = getattr(args, "pretrained_model", "")
self.joint_layers = nn.ModuleList()
if os.path.isfile(init_model):
print(f"initialize weight from {init_model}")
from fairseq import hub_utils
x = hub_utils.from_pretrained(
os.path.dirname(init_model),
checkpoint_file=os.path.basename(init_model),
)
in_state_dict = x["models"][0].state_dict()
init_args = x["args"].model
num_positional_emb = init_args.max_positions + task.dictionary.pad() + 1
# follow the setup in roberta
self.model = TransformerSentenceEncoder(
padding_idx=task.dictionary.pad(),
vocab_size=len(task.dictionary),
num_encoder_layers=getattr(
args, "encoder_layers", init_args.encoder_layers
),
embedding_dim=init_args.encoder_embed_dim,
ffn_embedding_dim=init_args.encoder_ffn_embed_dim,
num_attention_heads=init_args.encoder_attention_heads,
dropout=init_args.dropout,
attention_dropout=init_args.attention_dropout,
activation_dropout=init_args.activation_dropout,
num_segments=2, # add language embeddings
max_seq_len=num_positional_emb,
offset_positions_by_padding=False,
encoder_normalize_before=True,
apply_bert_init=True,
activation_fn=init_args.activation_fn,
freeze_embeddings=args.freeze_embeddings,
n_trans_layers_to_freeze=args.n_trans_layers_to_freeze,
)
# still need to learn segment embeddings as we added a second language embedding
if args.freeze_embeddings:
for p in self.model.segment_embeddings.parameters():
p.requires_grad = False
update_init_roberta_model_state(in_state_dict)
print("loading weights from the pretrained model")
self.model.load_state_dict(
in_state_dict, strict=False
) # ignore mismatch in language embeddings
ffn_embedding_dim = init_args.encoder_ffn_embed_dim
num_attention_heads = init_args.encoder_attention_heads
dropout = init_args.dropout
attention_dropout = init_args.attention_dropout
activation_dropout = init_args.activation_dropout
activation_fn = init_args.activation_fn
classifier_embed_dim = getattr(
args, "embed_dim", init_args.encoder_embed_dim
)
if classifier_embed_dim != init_args.encoder_embed_dim:
self.transform_layer = nn.Linear(
init_args.encoder_embed_dim, classifier_embed_dim
)
else:
self.model = TransformerSentenceEncoder(
padding_idx=task.dictionary.pad(),
vocab_size=len(task.dictionary),
num_encoder_layers=args.encoder_layers,
embedding_dim=args.embed_dim,
ffn_embedding_dim=args.ffn_embed_dim,
num_attention_heads=args.attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
max_seq_len=task.max_positions()
if task.max_positions()
else args.tokens_per_sample,
num_segments=2,
offset_positions_by_padding=False,
encoder_normalize_before=args.encoder_normalize_before,
apply_bert_init=args.apply_bert_init,
activation_fn=args.activation_fn,
)
classifier_embed_dim = args.embed_dim
ffn_embedding_dim = args.ffn_embed_dim
num_attention_heads = args.attention_heads
dropout = args.dropout
attention_dropout = args.attention_dropout
activation_dropout = args.activation_dropout
activation_fn = args.activation_fn
self.joint_classification = args.joint_classification
if args.joint_classification == "sent":
if args.joint_normalize_before:
self.joint_layer_norm = LayerNorm(classifier_embed_dim)
else:
self.joint_layer_norm = None
self.joint_layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=classifier_embed_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
)
for _ in range(args.num_joint_layers)
]
)
self.classifier = RobertaClassificationHead(
classifier_embed_dim,
classifier_embed_dim,
1, # num_classes
"tanh",
args.classifier_dropout,
)
def forward(self, src_tokens, src_lengths):
segment_labels = self.get_segment_labels(src_tokens)
positions = self.get_positions(src_tokens, segment_labels)
inner_states, _ = self.model(
tokens=src_tokens,
segment_labels=segment_labels,
last_state_only=True,
positions=positions,
)
return inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C
def sentence_forward(self, encoder_out, src_tokens=None, sentence_rep="head"):
# encoder_out: B x T x C
if sentence_rep == "head":
x = encoder_out[:, :1, :]
else: # 'meanpool', 'maxpool'
assert src_tokens is not None, "meanpool requires src_tokens input"
segment_labels = self.get_segment_labels(src_tokens)
padding_mask = src_tokens.ne(self.padding_idx)
encoder_mask = segment_labels * padding_mask.type_as(segment_labels)
if sentence_rep == "meanpool":
ntokens = torch.sum(encoder_mask, dim=1, keepdim=True)
x = torch.sum(
encoder_out * encoder_mask.unsqueeze(2), dim=1, keepdim=True
) / ntokens.unsqueeze(2).type_as(encoder_out)
else: # 'maxpool'
encoder_out[
(encoder_mask == 0).unsqueeze(2).repeat(1, 1, encoder_out.shape[-1])
] = -float("inf")
x, _ = torch.max(encoder_out, dim=1, keepdim=True)
if hasattr(self, "transform_layer"):
x = self.transform_layer(x)
return x # B x 1 x C
def joint_forward(self, x):
# x: T x B x C
if self.joint_layer_norm:
x = self.joint_layer_norm(x.transpose(0, 1))
x = x.transpose(0, 1)
for layer in self.joint_layers:
x, _ = layer(x, self_attn_padding_mask=None)
return x
def classification_forward(self, x):
# x: B x T x C
return self.classifier(x)
@dataclass
class DiscriminativeNMTRerankerConfig(FairseqDataclass):
pretrained_model: str = field(
default="", metadata={"help": "pretrained model to load"}
)
sentence_rep: SENTENCE_REP_CHOICES = field(
default="head",
metadata={
"help": "method to transform the output of the transformer stack to a sentence-level representation"
},
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
classifier_dropout: float = field(
default=0.0, metadata={"help": "classifier dropout probability"}
)
embed_dim: int = field(default=768, metadata={"help": "embedding dimension"})
ffn_embed_dim: int = field(
default=2048, metadata={"help": "embedding dimension for FFN"}
)
encoder_layers: int = field(default=12, metadata={"help": "num encoder layers"})
attention_heads: int = field(default=8, metadata={"help": "num attention heads"})
encoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each encoder block"}
)
apply_bert_init: bool = field(
default=False, metadata={"help": "use custom param initialization for BERT"}
)
activation_fn: ACTIVATION_FN_CHOICES = field(
default="relu", metadata={"help": "activation function to use"}
)
freeze_embeddings: bool = field(
default=False, metadata={"help": "freeze embeddings in the pretrained model"}
)
n_trans_layers_to_freeze: int = field(
default=0,
metadata={
"help": "number of layers to freeze in the pretrained transformer model"
},
)
# joint classfication
joint_classification: JOINT_CLASSIFICATION_CHOICES = field(
default="none",
metadata={"help": "method to compute joint features for classification"},
)
num_joint_layers: int = field(
default=1, metadata={"help": "number of joint layers"}
)
joint_normalize_before: bool = field(
default=False,
metadata={"help": "apply layer norm on the input to the joint layer"},
)
@register_model(
"discriminative_nmt_reranker", dataclass=DiscriminativeNMTRerankerConfig
)
class DiscriminativeNMTReranker(BaseFairseqModel):
@classmethod
def build_model(cls, args, task):
model = BertRanker(args, task)
return DiscriminativeNMTReranker(args, model)
def __init__(self, args, model):
super().__init__()
self.model = model
self.sentence_rep = args.sentence_rep
self.joint_classification = args.joint_classification
def forward(self, src_tokens, src_lengths, **kwargs):
return self.model(src_tokens, src_lengths)
def sentence_forward(self, encoder_out, src_tokens):
return self.model.sentence_forward(encoder_out, src_tokens, self.sentence_rep)
def joint_forward(self, x):
return self.model.joint_forward(x)
def classification_forward(self, x):
return self.model.classification_forward(x)
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/models/discriminative_reranking_model.py |
#!/usr/bin/env python
import argparse
from multiprocessing import Pool
from pathlib import Path
import sacrebleu
import sentencepiece as spm
def read_text_file(filename):
with open(filename, "r") as f:
output = [line.strip() for line in f]
return output
def get_bleu(in_sent, target_sent):
bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]])
out = " ".join(
map(str, [bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts + bleu.totals)
)
return out
def get_ter(in_sent, target_sent):
ter = sacrebleu.corpus_ter([in_sent], [[target_sent]])
out = " ".join(map(str, [ter.score, ter.num_edits, ter.ref_length]))
return out
def init(sp_model):
global sp
sp = spm.SentencePieceProcessor()
sp.Load(sp_model)
def process(source_sent, target_sent, hypo_sent, metric):
source_bpe = " ".join(sp.EncodeAsPieces(source_sent))
hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent]
if metric == "bleu":
score_str = [get_bleu(h, target_sent) for h in hypo_sent]
else: # ter
score_str = [get_ter(h, target_sent) for h in hypo_sent]
return source_bpe, hypo_bpe, score_str
def main(args):
assert (
args.split.startswith("train") or args.num_shards == 1
), "--num-shards should be set to 1 for valid and test sets"
assert (
args.split.startswith("train")
or args.split.startswith("valid")
or args.split.startswith("test")
), "--split should be set to train[n]/valid[n]/test[n]"
source_sents = read_text_file(args.input_source)
target_sents = read_text_file(args.input_target)
num_sents = len(source_sents)
assert num_sents == len(
target_sents
), f"{args.input_source} and {args.input_target} should have the same number of sentences."
hypo_sents = read_text_file(args.input_hypo)
assert (
len(hypo_sents) % args.beam == 0
), f"Number of hypotheses ({len(hypo_sents)}) cannot be divided by beam size ({args.beam})."
hypo_sents = [
hypo_sents[i : i + args.beam] for i in range(0, len(hypo_sents), args.beam)
]
assert num_sents == len(
hypo_sents
), f"{args.input_hypo} should contain {num_sents * args.beam} hypotheses but only has {len(hypo_sents) * args.beam}. (--beam={args.beam})"
output_dir = args.output_dir / args.metric
for ns in range(args.num_shards):
print(f"processing shard {ns+1}/{args.num_shards}")
shard_output_dir = output_dir / f"split{ns+1}"
source_output_dir = shard_output_dir / "input_src"
hypo_output_dir = shard_output_dir / "input_tgt"
metric_output_dir = shard_output_dir / args.metric
source_output_dir.mkdir(parents=True, exist_ok=True)
hypo_output_dir.mkdir(parents=True, exist_ok=True)
metric_output_dir.mkdir(parents=True, exist_ok=True)
if args.n_proc > 1:
with Pool(
args.n_proc, initializer=init, initargs=(args.sentencepiece_model,)
) as p:
output = p.starmap(
process,
[
(source_sents[i], target_sents[i], hypo_sents[i], args.metric)
for i in range(ns, num_sents, args.num_shards)
],
)
else:
init(args.sentencepiece_model)
output = [
process(source_sents[i], target_sents[i], hypo_sents[i], args.metric)
for i in range(ns, num_sents, args.num_shards)
]
with open(source_output_dir / f"{args.split}.bpe", "w") as s_o, open(
hypo_output_dir / f"{args.split}.bpe", "w"
) as h_o, open(metric_output_dir / f"{args.split}.{args.metric}", "w") as m_o:
for source_bpe, hypo_bpe, score_str in output:
assert len(hypo_bpe) == len(score_str)
for h, m in zip(hypo_bpe, score_str):
s_o.write(f"{source_bpe}\n")
h_o.write(f"{h}\n")
m_o.write(f"{m}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-source", type=Path, required=True)
parser.add_argument("--input-target", type=Path, required=True)
parser.add_argument("--input-hypo", type=Path, required=True)
parser.add_argument("--output-dir", type=Path, required=True)
parser.add_argument("--split", type=str, required=True)
parser.add_argument("--beam", type=int, required=True)
parser.add_argument("--sentencepiece-model", type=str, required=True)
parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu")
parser.add_argument("--num-shards", type=int, default=1)
parser.add_argument("--n-proc", type=int, default=8)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/scripts/prep_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
_EPSILON = torch.finfo(torch.float32).eps
TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"])
@dataclass
class KLDivergenceRerankingCriterionConfig(FairseqDataclass):
target_dist_norm: TARGET_DIST_NORM_CHOICES = field(
default="none",
metadata={"help": "method to normalize the range of target scores"},
)
temperature: float = field(
default=1.0,
metadata={"help": "temperature in softmax for target distributions"},
)
forward_batch_size: int = field(
default=32,
metadata={
"help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)"
},
)
@register_criterion(
"kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig
)
class KLDivergenceRerankingCriterion(FairseqCriterion):
def __init__(
self, task, target_dist_norm, temperature, forward_batch_size,
):
super().__init__(task)
self.target_dist_norm = target_dist_norm
self.temperature = temperature
self.forward_batch_size = forward_batch_size
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
sample_size = sample["id"].numel()
assert sample_size % self.task.cfg.mt_beam == 0, (
f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})."
f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}."
)
# split into smaller batches for model forward
batch_out = []
for i in range(0, sample_size, self.forward_batch_size):
j = min(i + self.forward_batch_size, sample_size)
out = model(
src_tokens=sample["net_input"]["src_tokens"][i:j, :],
src_lengths=sample["net_input"]["src_lengths"][i:j],
)
batch_out.append(
model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :])
)
batch_out = torch.cat(batch_out, dim=0).view(
self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1
) # T x B x C
if model.joint_classification == "sent":
batch_out = model.joint_forward(batch_out)
scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view(
-1, self.task.cfg.mt_beam
) # input: B x T x C
loss = self.compute_kl_loss(
scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam)
)
sample_size = sample_size // self.task.cfg.mt_beam
logging_output = {
"loss": loss.detach(),
"ntokens": sample["ntokens"],
"nsentences": sample_size * self.task.cfg.mt_beam,
"sample_size": sample_size,
"scores": scores.detach(),
}
return loss, sample_size, logging_output
def compute_kl_loss(self, logits, target):
norm_target = target
if self.target_dist_norm == "minmax":
min_v = torch.min(target, 1, keepdim=True).values
max_v = torch.max(target, 1, keepdim=True).values
norm_target = (target - min_v) / (max_v - min_v + _EPSILON)
target_dist = F.softmax(
norm_target / self.temperature, dim=-1, dtype=torch.float32
)
model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum()
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = loss_sum / sample_size / math.log(2)
metrics.log_scalar("loss", loss, sample_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py |
from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion
__all__ = [
"KLDivergenceRerankingCriterion",
]
| EXA-1-master | exa/libraries/fairseq/examples/discriminative_reranking_nmt/criterions/__init__.py |
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
with open("/your/urls/here") as f:
data = f.readlines()
urls = [i.strip() for i in data]
def get_results(endpoint_url, URL):
query = f"""SELECT ?uriLabel ?occupation ?occupationLabel ?dob ?dobLabel WHERE {{
<{URL}> schema:about ?uri .
?uri wdt:P106 ?occupation .
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}"""
user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
all_occupations = []
for URL in urls:
results = get_results(endpoint_url, URL)
occupations = []
for result in results["results"]["bindings"]:
occupations.append(result['occupationLabel']['value'])
all_occupations.append(result['uriLabel']['value'] + ", " + ", ".join(occupations))
assert(len(all_occupations) == len(urls))
with open("/your/file/output/here", "w") as o:
for line in all_occupations:
o.write(line.strip() + "\n") | EXA-1-master | exa/libraries/fairseq/examples/womens_bios/query_occupations_from_wikidata.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import collections
import argparse
import shutil
import subprocess
import sys
import tempfile
from multiprocessing import Pool
import sentencepiece as spm
def preprocess(spm_model_path, train_path, valid_path, test_path, dest_dir, remove_empty=False, output_format='piece', workers=20):
with tempfile.TemporaryDirectory() as tmp:
# Tokenize with SentencePiece
for split, path in ('train', train_path), ('valid', valid_path), ('test', test_path):
if path is None:
continue
if path == '-':
path = sys.stdin.fileno()
with open(path, encoding='utf-8', errors='surrogateescape') as fin:
with open(f'{tmp}/{split}', mode='w', encoding='utf-8', errors='surrogateescape') as fout:
encoder = MultiprocessingEncoder(model=spm_model_path, remove_empty=remove_empty, output_format=output_format)
pool = Pool(workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode, fin, 10000)
for i, line in enumerate(encoded_lines, start=1):
if line is not None:
print(line, file=fout)
if i % 10000 == 0:
print("tokenized {} lines".format(i), file=sys.stderr)
# Generate dictionary
sp = spm.SentencePieceProcessor(model_file=spm_model_path)
if output_format == 'piece':
vocab = [sp.id_to_piece(i) for i in range(3, sp.vocab_size())]
else:
vocab = map(str, range(sp.vocab_size()))
with open(f'{tmp}/dict.txt', mode='w', encoding='utf-8', errors='surrogateescape') as f:
for word in vocab:
print(word, 1, file=f)
# Binarize
command = [
'python3', '-m', 'fairseq_cli.preprocess',
'--only-source',
'--thresholdsrc', '0',
'--destdir', dest_dir,
'--srcdict', f'{tmp}/dict.txt',
'--workers', '20',
]
for split, path in ('train', train_path), ('valid', valid_path), ('test', test_path):
if path is not None:
command += [f'--{split}pref', f'{tmp}/{split}']
subprocess.run(command)
# Copy SentencePiece model
shutil.copyfile(spm_model_path, f'{dest_dir}/sentencepiece.bpe.model')
class MultiprocessingEncoder(object):
def __init__(self, model, remove_empty, output_format):
self.model = model
self.remove_empty = remove_empty
self.output_format = output_format
def initializer(self):
global sp
sp = spm.SentencePieceProcessor(model_file=self.model)
def encode(self, line):
global sp
line = line.strip()
if len(line) == 0 and self.remove_empty:
return None
if self.output_format == 'piece':
return ' '.join(sp.encode_as_pieces(line))
else:
return ' '.join(map(str, sp.encode(line)))
def write_lines(lines, path):
with open(path, mode='x', encoding='utf-8') as f:
for line in lines:
print(line, file=f)
def read_jsonl(path):
with open(path, encoding='utf-8') as f:
return [json.loads(line) for line in f.read().splitlines()]
def read_nli(path, langs=None):
data = read_jsonl(path)
if langs is not None:
data = [sample for sample in data if sample.get('language') in langs]
lang2count = collections.defaultdict(int)
for sample in data:
lang2count[sample.get('language')] += 1
if langs:
assert set(lang2count.keys()) == set(langs)
nlangs = len(lang2count)
assert nlangs > 0
lens = list(lang2count.values())
assert all([lens[0] == length for length in lens])
print(f'Loaded {lens[0]} samples in {nlangs} languages from {path}', file=sys.stderr)
return data
def main():
parser = argparse.ArgumentParser(description='Tokenize and binarize NLI data')
parser.add_argument('--sentencepiece-model', required=True)
parser.add_argument('--train', required=True, help='Training data in jsonl format')
parser.add_argument('--valid', required=True, help='Validation data in jsonl format')
parser.add_argument('--destdir', required=True)
args = parser.parse_args()
os.makedirs(args.destdir + '/raw',)
os.makedirs(args.destdir + '/bin', )
# Extract input/labels
for split, path in ('train', args.train), ('valid', args.valid):
data = read_nli(path, langs=None)
original_size = len(data)
data = [sample for sample in data if sample['gold_label'] != '-']
assert all(sample['gold_label'] in ('contradiction', 'entailment', 'neutral') for sample in data)
filtered_size = len(data)
if filtered_size != original_size:
print(f'Filtered {filtered_size}/{original_size} samples from {path}', file=sys.stderr)
for name, field in ('input0', 'sentence1'), ('input1', 'sentence2'), ('label', 'gold_label'):
write_lines([sample[field] for sample in data], f'{args.destdir}/raw/{split}.{name}.txt')
# Tokenize and binarize input
for field in 'input0', 'input1':
preprocess(
spm_model_path=args.sentencepiece_model,
train_path=f'{args.destdir}/raw/train.{field}.txt',
valid_path=f'{args.destdir}/raw/valid.{field}.txt',
test_path=None,
dest_dir=f'{args.destdir}/bin/{field}',
workers=20,
)
# Binarize labels
subprocess.run([
'python3', '-m', 'fairseq_cli.preprocess',
'--trainpref', f'{args.destdir}/raw/train.label.txt',
'--validpref', f'{args.destdir}/raw/valid.label.txt',
'--only-source',
'--thresholdsrc', '0',
'--destdir', f'{args.destdir}/bin/label',
'--workers', '20',
])
if __name__ == '__main__':
main()
| EXA-1-master | exa/libraries/fairseq/examples/xmod/preprocess_nli.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
from fairseq.utils import safe_hasattr
def get_avg_pool(
models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False
):
model = EnsembleModel(models)
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32)
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype(
np.float32
)
encoder_mask = np.expand_dims(encoder_mask.T, axis=2)
if has_langtok:
encoder_mask = encoder_mask[1:, :, :]
np_encoder_outs = np_encoder_outs[1, :, :]
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0)
return avg_pool
def main(args):
assert args.path is not None, "--path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
args.beam = 1
utils.import_user_module(args)
if args.max_tokens is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(":"),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_positions=utils.resolve_max_positions(
task.max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
num_sentences = 0
source_sentences = []
shard_id = 0
all_avg_pool = None
encoder_has_langtok = (
safe_hasattr(task.args, "encoder_langtok")
and task.args.encoder_langtok is not None
and safe_hasattr(task.args, "lang_tok_replacing_bos_eos")
and not task.args.lang_tok_replacing_bos_eos
)
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
if sample is None:
print("Skipping None")
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
with torch.no_grad():
avg_pool = get_avg_pool(
models,
sample,
prefix_tokens,
src_dict,
args.post_process,
has_langtok=encoder_has_langtok,
)
if all_avg_pool is not None:
all_avg_pool = np.concatenate((all_avg_pool, avg_pool))
else:
all_avg_pool = avg_pool
if not isinstance(sample["id"], list):
sample_ids = sample["id"].tolist()
else:
sample_ids = sample["id"]
for i, sample_id in enumerate(sample_ids):
# Remove padding
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.post_process)
else:
src_str = ""
if not args.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str))
source_sentences.append(f"{sample_id}\t{src_str}")
num_sentences += sample["nsentences"]
if all_avg_pool.shape[0] >= 1000000:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}",
"w",
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}",
"w",
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
all_avg_pool = None
source_sentences = []
shard_id += 1
if all_avg_pool is not None:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w"
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w"
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
return None
def cli_main():
parser = options.get_generation_parser()
parser.add_argument(
"--encoder-save-dir",
default="",
type=str,
metavar="N",
help="directory to save encoder outputs",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/fairseq/examples/criss/save_encoder.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
from subprocess import check_call
try:
import faiss
has_faiss = True
except ImportError:
has_faiss = False
import numpy as np
GB = 1024 * 1024 * 1024
def call(cmd):
print(cmd)
check_call(cmd, shell=True)
def get_batches(directory, lang, prefix="all_avg_pool"):
print(f"Finding in {directory}/{prefix}.{lang}*")
files = glob.glob(f"{directory}/{prefix}.{lang}*")
emb_files = []
txt_files = []
for emb_fi in files:
emb_files.append(emb_fi)
txt_fi = emb_fi.replace(prefix, "sentences")
txt_files.append(txt_fi)
return emb_files, txt_files
def load_batch(emb_file, dim):
embeddings = np.fromfile(emb_file, dtype=np.float32)
num_rows = int(embeddings.shape[0] / dim)
embeddings = embeddings.reshape((num_rows, dim))
faiss.normalize_L2(embeddings)
return embeddings
def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction="x2y"):
if not has_faiss:
raise ImportError("Please install Faiss")
sims = []
inds = []
xfrom = 0
xto = 0
for x_batch_f in x_batches_f:
yfrom = 0
yto = 0
x_batch = load_batch(x_batch_f, dim)
xto = xfrom + x_batch.shape[0]
bsims, binds = [], []
for y_batch_f in y_batches_f:
y_batch = load_batch(y_batch_f, dim)
neighbor_size = min(k, y_batch.shape[0])
yto = yfrom + y_batch.shape[0]
print("{}-{} -> {}-{}".format(xfrom, xto, yfrom, yto))
idx = faiss.IndexFlatIP(dim)
idx = faiss.index_cpu_to_all_gpus(idx)
idx.add(y_batch)
bsim, bind = idx.search(x_batch, neighbor_size)
bsims.append(bsim)
binds.append(bind + yfrom)
yfrom += y_batch.shape[0]
del idx
del y_batch
bsims = np.concatenate(bsims, axis=1)
binds = np.concatenate(binds, axis=1)
aux = np.argsort(-bsims, axis=1)
sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32)
ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64)
for i in range(x_batch.shape[0]):
for j in range(k):
sim_batch[i, j] = bsims[i, aux[i, j]]
ind_batch[i, j] = binds[i, aux[i, j]]
sims.append(sim_batch)
inds.append(ind_batch)
xfrom += x_batch.shape[0]
del x_batch
sim = np.concatenate(sims, axis=0)
ind = np.concatenate(inds, axis=0)
return sim, ind
def score(sim, fwd_mean, bwd_mean, margin):
return margin(sim, (fwd_mean + bwd_mean) / 2)
def score_candidates(
sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False
):
print(" - scoring {:d} candidates".format(sim_mat.shape[0]))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = int(candidate_inds[i, j])
scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin)
return scores
def load_text(files):
all_sentences = []
for fi in files:
with open(fi) as sentence_fi:
for line in sentence_fi:
all_sentences.append(line.strip())
print(f"Read {len(all_sentences)} sentences")
return all_sentences
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mine bitext")
parser.add_argument("--src-lang", help="Source language")
parser.add_argument("--tgt-lang", help="Target language")
parser.add_argument(
"--dict-path", help="Path to dictionary file", default="dict.txt"
)
parser.add_argument(
"--spm-path", help="Path to SPM model file", default="sentence.bpe.model"
)
parser.add_argument("--dim", type=int, default=1024, help="Embedding dimension")
parser.add_argument("--mem", type=int, default=5, help="Memory in GB")
parser.add_argument("--src-dir", help="Source directory")
parser.add_argument("--tgt-dir", help="Target directory")
parser.add_argument("--output", help="Output path")
parser.add_argument(
"--neighborhood", type=int, default=4, help="Embedding dimension"
)
parser.add_argument(
"--threshold", type=float, default=1.06, help="Threshold on mined bitext"
)
parser.add_argument(
"--valid-size",
type=int,
default=2000,
help="Number of sentences used for validation set",
)
parser.add_argument(
"--min-count",
type=int,
default=50000,
help="Min num sentences used for each language",
)
args = parser.parse_args()
x_batches_f, x_sents_f = get_batches(args.src_dir, args.src_lang)
y_batches_f, y_sents_f = get_batches(args.tgt_dir, args.tgt_lang)
margin = lambda a, b: a / b
y2x_sim, y2x_ind = knnGPU_sharded(
y_batches_f, x_batches_f, args.dim, args.neighborhood, direction="y2x"
)
x2y_sim, x2y_ind = knnGPU_sharded(
x_batches_f, y_batches_f, args.dim, args.neighborhood, direction="x2y"
)
x2y_mean = x2y_sim.mean(axis=1)
y2x_mean = y2x_sim.mean(axis=1)
fwd_scores = score_candidates(x2y_sim, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x_sim, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x2y_sim.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y2x_sim.shape[0]), bwd_scores.argmax(axis=1)]
indices = np.stack(
(
np.concatenate((np.arange(x2y_ind.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y2x_ind.shape[0]))),
),
axis=1,
)
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
x_sentences = load_text(x_sents_f)
y_sentences = load_text(y_sents_f)
threshold = args.threshold
min_count = args.min_count
seen_src, seen_trg = set(), set()
directory = args.output
call(f"mkdir -p {directory}")
src_out = open(
f"{directory}/all.{args.src_lang}",
mode="w",
encoding="utf-8",
errors="surrogateescape",
)
tgt_out = open(
f"{directory}/all.{args.tgt_lang}",
mode="w",
encoding="utf-8",
errors="surrogateescape",
)
scores_out = open(
f"{directory}/all.scores", mode="w", encoding="utf-8", errors="surrogateescape"
)
count = 0
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if src_ind not in seen_src and trg_ind not in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold or count < min_count:
if x_sentences[src_ind]:
print(scores[i], file=scores_out)
print(x_sentences[src_ind], file=src_out)
print(y_sentences[trg_ind], file=tgt_out)
count += 1
else:
print(f"Ignoring sentence: {x_sentences[src_ind]}")
src_out.close()
tgt_out.close()
scores_out.close()
print(f"Found {count} pairs for threshold={threshold}")
with open(f"{directory}/all.{args.src_lang}") as all_s, open(
f"{directory}/all.{args.tgt_lang}"
) as all_t, open(f"{directory}/valid.{args.src_lang}", "w") as valid_s, open(
f"{directory}/valid.{args.tgt_lang}", "w"
) as valid_t, open(
f"{directory}/train.{args.src_lang}", "w"
) as train_s, open(
f"{directory}/train.{args.tgt_lang}", "w"
) as train_t:
count = 0
for s_line, t_line in zip(all_s, all_t):
s_line = s_line.split("\t")[1]
t_line = t_line.split("\t")[1]
if count >= args.valid_size:
train_s.write(s_line)
train_t.write(t_line)
else:
valid_s.write(s_line)
valid_t.write(t_line)
count += 1
| EXA-1-master | exa/libraries/fairseq/examples/criss/mining/mine.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import numpy as np
DIM = 1024
def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False):
target_ids = [tid for tid in target_embs]
source_mat = np.stack(source_embs.values(), axis=0)
normalized_source_mat = source_mat / np.linalg.norm(
source_mat, axis=1, keepdims=True
)
target_mat = np.stack(target_embs.values(), axis=0)
normalized_target_mat = target_mat / np.linalg.norm(
target_mat, axis=1, keepdims=True
)
sim_mat = normalized_source_mat.dot(normalized_target_mat.T)
if return_sim_mat:
return sim_mat
neighbors_map = {}
for i, sentence_id in enumerate(source_embs):
idx = np.argsort(sim_mat[i, :])[::-1][:k]
neighbors_map[sentence_id] = [target_ids[tid] for tid in idx]
return neighbors_map
def load_embeddings(directory, LANGS):
sentence_embeddings = {}
sentence_texts = {}
for lang in LANGS:
sentence_embeddings[lang] = {}
sentence_texts[lang] = {}
lang_dir = f"{directory}/{lang}"
embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*")
for embed_file in embedding_files:
shard_id = embed_file.split(".")[-1]
embeddings = np.fromfile(embed_file, dtype=np.float32)
num_rows = embeddings.shape[0] // DIM
embeddings = embeddings.reshape((num_rows, DIM))
with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file:
for idx, line in enumerate(sentence_file):
sentence_id, sentence = line.strip().split("\t")
sentence_texts[lang][sentence_id] = sentence
sentence_embeddings[lang][sentence_id] = embeddings[idx, :]
return sentence_embeddings, sentence_texts
def compute_accuracy(directory, LANGS):
sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS)
top_1_accuracy = {}
top1_str = " ".join(LANGS) + "\n"
for source_lang in LANGS:
top_1_accuracy[source_lang] = {}
top1_str += f"{source_lang} "
for target_lang in LANGS:
top1 = 0
top5 = 0
neighbors_map = compute_dist(
sentence_embeddings[source_lang], sentence_embeddings[target_lang]
)
for sentence_id, neighbors in neighbors_map.items():
if sentence_id == neighbors[0]:
top1 += 1
if sentence_id in neighbors[:5]:
top5 += 1
n = len(sentence_embeddings[target_lang])
top1_str += f"{top1/n} "
top1_str += "\n"
print(top1_str)
print(top1_str, file=open(f"{directory}/accuracy", "w"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Analyze encoder outputs")
parser.add_argument("directory", help="Source language corpus")
parser.add_argument("--langs", help="List of langs")
args = parser.parse_args()
langs = args.langs.split(",")
compute_accuracy(args.directory, langs)
| EXA-1-master | exa/libraries/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.search import Search
class NoisyChannelBeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.fw_scores_buf = None
self.lm_scores_buf = None
def _init_buffers(self, t):
# super()._init_buffers(t)
if self.fw_scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
self.fw_scores_buf = t.new()
self.lm_scores_buf = t.new()
def combine_fw_bw(self, combine_method, fw_cum, bw, step):
if combine_method == "noisy_channel":
fw_norm = fw_cum.div(step + 1)
lprobs = bw + fw_norm
elif combine_method == "lm_only":
lprobs = bw + fw_cum
return lprobs
def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method):
self._init_buffers(fw_lprobs)
bsz, beam_size, vocab_size = fw_lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous()
bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous()
# nothing to add since we are at the first step
fw_lprobs_cum = fw_lprobs
else:
# make probs contain cumulative scores for each hypothesis
raw_scores = (scores[:, :, step - 1].unsqueeze(-1))
fw_lprobs_cum = (fw_lprobs.add(raw_scores))
combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step)
# choose the top k according to the combined noisy channel model score
torch.topk(
combined_lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
# save corresponding fw and lm scores
self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf)
self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf)
# Project back into relative indices and beams
self.beams_buf = self.indices_buf // vocab_size
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
| EXA-1-master | exa/libraries/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import noisy_channel_translation # noqa
from . import noisy_channel_sequence_generator # noqa
from . import noisy_channel_beam_search # noqa
| EXA-1-master | exa/libraries/fairseq/examples/fast_noisy_channel/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from .noisy_channel_beam_search import NoisyChannelBeamSearch
from fairseq.sequence_generator import EnsembleModel
class NoisyChannelSequenceGenerator(object):
def __init__(
self,
combine_method,
tgt_dict,
src_dict=None,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
len_penalty=1.0,
unk_penalty=0.0,
retain_dropout=False,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
normalize_scores=True,
channel_models=None,
k2=10,
ch_weight=1.0,
channel_scoring_type='log_norm',
top_k_vocab=0,
lm_models=None,
lm_dict=None,
lm_weight=1.0,
normalize_lm_scores_by_tgt_len=False,
):
"""Generates translations of a given source sentence,
using beam search with noisy channel decoding.
Args:
combine_method (string, optional): Method to combine direct, LM and
channel model scores (default: None)
tgt_dict (~fairseq.data.Dictionary): target dictionary
src_dict (~fairseq.data.Dictionary): source dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
no_repeat_ngram_size (int, optional): Size of n-grams that we avoid
repeating in the generation (default: 0)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
channel_models (List[~fairseq.models.FairseqModel]): ensemble of models
translating from the target to the source
k2 (int, optional): Top K2 candidates to score per beam at each step (default:10)
ch_weight (int, optional): Weight associated with the channel model score
assuming that the direct model score has weight 1.0 (default: 1.0)
channel_scoring_type (str, optional): String specifying how to score
the channel model (default: 'log_norm')
top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or
`'src_vocab_batched'`, then this parameter specifies the number of
most frequent tokens to include in the channel model output vocabulary,
in addition to the source tokens in the input batch (default: 0)
lm_models (List[~fairseq.models.FairseqModel]): ensemble of models
generating text in the target language
lm_dict (~fairseq.data.Dictionary): LM Model dictionary
lm_weight (int, optional): Weight associated with the LM model score
assuming that the direct model score has weight 1.0 (default: 1.0)
normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores
by the target length? By default, we normalize the combination of
LM and channel model scores by the source length
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
self.channel_models = channel_models
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.combine_method = combine_method
self.k2 = k2
self.ch_weight = ch_weight
self.channel_scoring_type = channel_scoring_type
self.top_k_vocab = top_k_vocab
self.lm_models = lm_models
self.lm_dict = lm_dict
self.lm_weight = lm_weight
self.log_softmax_fn = torch.nn.LogSoftmax(dim=1)
self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len
self.share_tgt_dict = (self.lm_dict == self.tgt_dict)
self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict)
self.ch_scoring_bsz = 3072
assert temperature > 0, '--temperature must be greater than 0'
self.search = NoisyChannelBeamSearch(tgt_dict)
@torch.no_grad()
def generate(
self,
models,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
"""
model = EnsembleModel(models)
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(model.models_size)
],
)
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths_no_eos.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
src_lengths = encoder_input['src_lengths']
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
# reorder source tokens so they may be used as a reference in generating P(S|T)
src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index)
src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len)
src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1)
attn, attn_buf = None, None
nonpad_idxs = None
# The cands_to_ignore indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
fw scores for each hypothesis
combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing
combined noisy channel scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths_no_eos[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k):
"""Rescore the top k hypothesis from each beam using noisy channel modeling
Returns:
new_fw_lprobs: the direct model probabilities after pruning the top k
new_ch_lm_lprobs: the combined channel and language model probabilities
new_lm_lprobs: the language model probabilities after pruning the top k
"""
with torch.no_grad():
lprobs_size = lprobs.size()
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1)
# need to calculate and save fw and lm probs for prefix tokens
fw_top_k = cand_scores
fw_top_k_idx = cand_indices
k = 1
else:
# take the top k best words for every sentence in batch*beam
fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k)
eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0]
ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0)
src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype)
if self.combine_method != "lm_only":
temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index
cur_tgt_size = step+2
# add eos to all candidate sentences except those that already end in eos
eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1)
eos_tokens[eos_idx] = self.tgt_dict.pad_index
if step == 0:
channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1)
ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size))
ch_input_lengths[eos_idx] = cur_tgt_size-1
if self.channel_scoring_type == "unnormalized":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:])
ch_intermed_scores = ch_intermed_scores.float()
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "k2_separate":
for k_idx in range(k):
k_eos_tokens = eos_tokens[k_idx::k, :]
if step == 0:
k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
k_ch_input_lengths = ch_input_lengths[k_idx::k]
k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens)
k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True)
k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2)
k_ch_intermed_scores *= not_padding.float()
ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "src_vocab":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_lprobs = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab)
ch_scores = torch.sum(ch_lprobs, dim=1)
elif self.channel_scoring_type == "src_vocab_batched":
ch_bsz_size = temp_src_tokens_full.shape[0]
ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz))
for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)):
end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size)
temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :]
channel_input_batch = channel_input[start_idx:end_idx, :]
ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx]
ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch)
ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True)
ch_lprobs_list[i] = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output_batch, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab,
start_idx=start_idx, end_idx=end_idx)
ch_lprobs = torch.cat(ch_lprobs_list, dim=0)
ch_scores = torch.sum(ch_lprobs, dim=1)
else:
ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full)
ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True)
ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1)
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
else:
cur_tgt_size = 0
ch_scores = ch_scores.view(bsz*beam_size, k)
expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten()
if self.share_tgt_dict:
lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k)
else:
new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm)
new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm)
lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k)
lm_scores.add_(expanded_lm_prefix_scores)
ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size)
# initialize all as min value
new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_fw_lprobs[:, self.pad] = -math.inf
new_ch_lm_lprobs[:, self.pad] = -math.inf
new_lm_lprobs[:, self.pad] = -math.inf
new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k)
new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores)
new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k))
return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs
def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size):
if self.channel_scoring_type == "unnormalized":
ch_scores = self.log_softmax_fn(
ch_scores.view(-1, self.beam_size * self.k2)
).view(ch_scores.shape)
ch_scores = ch_scores * self.ch_weight
lm_scores1 = lm_scores1 * self.lm_weight
if combine_type == "lm_only":
# log P(T|S) + log P(T)
ch_scores = lm_scores1.view(ch_scores.size())
elif combine_type == "noisy_channel":
# 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T)
if self.normalize_lm_scores_by_tgt_len:
ch_scores.div_(src_size)
lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size)
ch_scores.add_(lm_scores_norm)
# 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T)
else:
ch_scores.add_(lm_scores1.view(ch_scores.size()))
ch_scores.div_(src_size)
return ch_scores
if self.channel_models is not None:
channel_model = self.channel_models[0] # assume only one channel_model model
else:
channel_model = None
lm = EnsembleModel(self.lm_models)
lm_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(lm.models_size)
],
)
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lm.reorder_incremental_state(lm_incremental_states, reorder_state)
fw_lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature,
)
fw_lprobs[:, self.pad] = -math.inf # never select pad
fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2)
# handle min and max length constraints
if step >= max_len:
fw_lprobs[:, :self.eos] = -math.inf
fw_lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
fw_lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_mask = prefix_toks.ne(self.pad)
prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
fw_lprobs[prefix_mask] = -math.inf
fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs
)
prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
ch_lm_lprobs[prefix_mask] = -math.inf
ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs
)
prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
lm_lprobs[prefix_mask] = -math.inf
lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim)
ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim)
lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(fw_lprobs)
scores_buf = scores_buf.type_as(fw_lprobs)
self.search.set_src_lengths(src_lengths_no_eos)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step(
step,
fw_lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size),
lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for candidates to be ignored)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size] &= ~cands_to_ignore
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size]
)
combined_noisy_channel_eos_scores = torch.masked_select(
combined_noisy_channel_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
)
# finalize hypo using channel model score
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = torch.nonzero(batch_mask).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs]
fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths_no_eos = src_lengths_no_eos[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze()
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# ignored hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
eos_mask[:, :beam_size] |= cands_to_ignore
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_cands_to_ignore, active_hypos)
)
# update cands_to_ignore to ignore any finalized hypos
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
assert (~cands_to_ignore).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
torch.gather(
lm_lprobs_top_k, dim=1, index=active_hypos,
out=lm_prefix_scores.view(bsz, beam_size)
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k):
with torch.no_grad():
lm_lprobs, avg_attn_scores = model.forward_decoder(
input_tokens, encoder_outs=None, incremental_states=incremental_states,
)
lm_lprobs_size = lm_lprobs.size(0)
probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1)
return probs_next_wrd
def make_dict2dict(old_dict, new_dict):
dict2dict_map = {}
for sym in old_dict.symbols:
dict2dict_map[old_dict.index(sym)] = new_dict.index(sym)
return dict2dict_map
def dict2dict(tokens, dict2dict_map):
if tokens.device == torch.device('cpu'):
tokens_tmp = tokens
else:
tokens_tmp = tokens.cpu()
return tokens_tmp.map_(
tokens_tmp,
lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)]
).to(tokens.device)
def reorder_tokens(tokens, lengths, eos):
# reorder source tokens so they may be used as reference for P(S|T)
return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0)
def reorder_all_tokens(tokens, lengths, eos):
# used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>]
# so source tokens can be used to predict P(S|T)
return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)])
def normalized_scores_with_batch_vocab(
model_decoder, features, target_ids, k, bsz, beam_size,
pad_idx, top_k=0, vocab_size_meter=None, start_idx=None,
end_idx=None, **kwargs):
"""
Get normalized probabilities (or log probs) from a net's output
w.r.t. vocab consisting of target IDs in the batch
"""
if model_decoder.adaptive_softmax is None:
weight = model_decoder.output_projection.weight
vocab_ids = torch.unique(
torch.cat(
(torch.unique(target_ids), torch.arange(top_k, device=target_ids.device))
)
)
id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids))))
mapped_target_ids = target_ids.cpu().apply_(
lambda x, id_map=id_map: id_map[x]
).to(target_ids.device)
expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
if start_idx is not None and end_idx is not None:
expanded_target_ids = expanded_target_ids[start_idx:end_idx, :]
logits = F.linear(features, weight[vocab_ids, :])
log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32)
intermed_scores = torch.gather(
log_softmax[:, :-1, :],
2,
expanded_target_ids[:, 1:].unsqueeze(2),
).squeeze()
not_padding = expanded_target_ids[:, 1:] != pad_idx
intermed_scores *= not_padding.float()
return intermed_scores
else:
raise ValueError("adaptive softmax doesn't work with " +
"`normalized_scores_with_batch_vocab()`")
| EXA-1-master | exa/libraries/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.tasks.translation import TranslationTask
from fairseq.tasks.language_modeling import LanguageModelingTask
from fairseq import checkpoint_utils
import argparse
from fairseq.tasks import register_task
import torch
@register_task("noisy_channel_translation")
class NoisyChannelTranslation(TranslationTask):
"""
Rescore the top k candidates from each beam using noisy channel modeling
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
TranslationTask.add_args(parser)
# fmt: off
parser.add_argument('--channel-model', metavar='FILE',
help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.')
parser.add_argument('--combine-method', default='lm_only',
choices=['lm_only', 'noisy_channel'],
help="""method for combining direct and channel model scores.
lm_only: decode with P(T|S)P(T)
noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""")
parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False,
help='normalize lm score by target length instead of source length')
parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'],
help="Normalize bw scores with log softmax or return bw scores without log softmax")
parser.add_argument('--top-k-vocab', default=0, type=int,
help='top k vocab IDs to use with `src_vocab` in channel model scoring')
parser.add_argument('--k2', default=50, type=int,
help='the top k2 candidates to rescore with the noisy channel model for each beam')
parser.add_argument('--ch-wt', default=1, type=float,
help='weight for the channel model')
parser.add_argument('--lm-model', metavar='FILE',
help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side')
parser.add_argument('--lm-data', metavar='FILE',
help='path to lm model training data for target language, used to properly load LM with correct dictionary')
parser.add_argument('--lm-wt', default=1, type=float,
help='the weight of the lm in joint decoding')
# fmt: on
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
raise NotImplementedError()
else:
from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator
use_cuda = torch.cuda.is_available() and not self.args.cpu
assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!'
assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs'
if self.args.channel_model is not None:
import copy
ch_args_task = copy.deepcopy(self.args)
tmp = ch_args_task.source_lang
ch_args_task.source_lang = ch_args_task.target_lang
ch_args_task.target_lang = tmp
ch_args_task._name = 'translation'
channel_task = TranslationTask.setup_task(ch_args_task)
arg_dict = {}
arg_dict['task'] = 'language_modeling'
arg_dict['sample_break_mode'] = 'eos'
arg_dict['data'] = self.args.lm_data
arg_dict['output_dictionary_size'] = -1
lm_args = argparse.Namespace(**arg_dict)
lm_task = LanguageModelingTask.setup_task(lm_args)
lm_dict = lm_task.output_dictionary
if self.args.channel_model is not None:
channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task)
for model in channel_models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if self.args.fp16:
model.half()
if use_cuda:
model.cuda()
else:
channel_models = None
lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task)
for model in lm_models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if self.args.fp16:
model.half()
if use_cuda:
model.cuda()
return NoisyChannelSequenceGenerator(
combine_method=self.args.combine_method,
tgt_dict=self.target_dictionary,
src_dict=self.source_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
normalize_scores=(not getattr(args, 'unnormalized', False)),
channel_models=channel_models,
k2=getattr(self.args, 'k2', 50),
ch_weight=getattr(self.args, 'ch_wt', 1),
channel_scoring_type=self.args.channel_scoring_type,
top_k_vocab=self.args.top_k_vocab,
lm_models=lm_models,
lm_dict=lm_dict,
lm_weight=getattr(self.args, 'lm_wt', 1),
normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False),
)
| EXA-1-master | exa/libraries/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as op
from collections import namedtuple
from multiprocessing import cpu_count
from typing import List, Optional
import sentencepiece as sp
from fairseq.data.encoders.byte_bpe import ByteBPE
from fairseq.data.encoders.byte_utils import byte_encode
from fairseq.data.encoders.bytes import Bytes
from fairseq.data.encoders.characters import Characters
from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
SPLITS = ["train", "valid", "test"]
def _convert_xml(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if not ss.startswith("<seg"):
continue
ss = ss.replace("</seg>", "").split('">')
assert len(ss) == 2
f_o.write(ss[1].strip() + "\n")
def _convert_train(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if ss.startswith("<"):
continue
f_o.write(ss.strip() + "\n")
def _get_bytes(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Bytes.encode(s.strip()) + "\n")
def _get_chars(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Characters.encode(s.strip()) + "\n")
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
Args = namedtuple(
"Args",
[
"moses_source_lang",
"moses_target_lang",
"moses_no_dash_splits",
"moses_no_escape",
],
)
args = Args(
moses_source_lang=src,
moses_target_lang=tgt,
moses_no_dash_splits=False,
moses_no_escape=False,
)
pretokenizer = MosesTokenizer(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(pretokenizer.encode(s.strip()) + "\n")
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
with open(out_path, "w") as f_o:
for lang in [src, tgt]:
with open(f"{in_path_prefix}.{lang}") as f:
for s in f:
f_o.write(byte_encode(s.strip()) + "\n")
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
arguments = [
f"--input={in_path}",
f"--model_prefix={model_prefix}",
f"--model_type=bpe",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
"--normalization_rule_name=identity",
f"--num_threads={cpu_count()}",
]
sp.SentencePieceTrainer.Train(" ".join(arguments))
def _apply_bbpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model_path"])
args = Args(sentencepiece_model_path=model_path)
tokenizer = ByteBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _apply_bpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model"])
args = Args(sentencepiece_model=model_path)
tokenizer = SentencepieceBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _concat_files(in_paths: List[str], out_path: str):
with open(out_path, "w") as f_o:
for p in in_paths:
with open(p) as f:
for r in f:
f_o.write(r)
def preprocess_iwslt17(
root: str,
src: str,
tgt: str,
bpe_size: Optional[int],
need_chars: bool,
bbpe_size: Optional[int],
need_bytes: bool,
):
# extract bitext
in_root = op.join(root, f"{src}-{tgt}")
for lang in [src, tgt]:
_convert_train(
op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"),
op.join(root, f"train.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"),
op.join(root, f"valid.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"),
op.join(root, f"test.{lang}"),
)
# pre-tokenize
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(
op.join(root, f"{split}.{lang}"),
op.join(root, f"{split}.moses.{lang}"),
src,
tgt,
)
# tokenize with BPE vocabulary
if bpe_size is not None:
# learn vocabulary
concated_train_path = op.join(root, "train.all")
_concat_files(
[op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")],
concated_train_path,
)
bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}")
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe(
bpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"),
)
# tokenize with bytes vocabulary
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bytes.{lang}"),
)
# tokenize with characters vocabulary
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.chars.{lang}"),
)
# tokenize with byte-level BPE vocabulary
if bbpe_size is not None:
# learn vocabulary
bchar_path = op.join(root, "train.bchar")
_convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}")
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe(
bbpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"),
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str, default="data")
parser.add_argument(
"--bpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--bbpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BBPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--byte-vocab",
action="store_true",
help="Generate tokenized bitext with bytes vocabulary",
)
parser.add_argument(
"--char-vocab",
action="store_true",
help="Generate tokenized bitext with chars vocabulary",
)
args = parser.parse_args()
preprocess_iwslt17(
args.root,
"fr",
"en",
args.bpe_vocab,
args.char_vocab,
args.bbpe_vocab,
args.byte_vocab,
)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/examples/byte_level_bpe/get_bitext.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerEncoder, TransformerModel
@register_model("gru_transformer")
class GRUTransformerModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return GRUTransformerEncoder(args, src_dict, embed_tokens)
class GRUTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.emb_ctx = nn.GRU(
input_size=embed_tokens.embedding_dim,
hidden_size=embed_tokens.embedding_dim // 2,
num_layers=1,
bidirectional=True,
)
def forward_embedding(self, src_tokens):
# embed tokens and positions
x = embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
# contextualize embeddings
x = x.transpose(0, 1)
x = self.dropout_module(x)
x, _ = self.emb_ctx.forward(x)
x = x.transpose(0, 1)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
@register_model_architecture("gru_transformer", "gru_transformer")
def gru_transformer_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
@register_model_architecture("gru_transformer", "gru_transformer_big")
def gru_transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
gru_transformer_base_architecture(args)
| EXA-1-master | exa/libraries/fairseq/examples/byte_level_bpe/gru_transformer.py |
#!/usr/bin/env python
"""Helper script to compare two argparse.Namespace objects."""
from argparse import Namespace # noqa
def main():
ns1 = eval(input("Namespace 1: "))
ns2 = eval(input("Namespace 2: "))
def keys(ns):
ks = set()
for k in dir(ns):
if not k.startswith("_"):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if ns2 is None:
print("{}\t{}".format(k, getattr(ns1, k, None)))
else:
print(
"{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None))
)
print("Keys unique to namespace 1:")
print_keys(k1 - k2, ns1)
print()
print("Keys unique to namespace 2:")
print_keys(k2 - k1, ns2)
print()
print("Overlapping keys with different values:")
ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")]
print_keys(ks, ns1, ns2)
print()
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/compare_namespaces.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("sample_output", help="train output file")
parser.add_argument("remainder_output", help="valid output file")
parser.add_argument("-k", type=int, help="remainder size")
parser.add_argument(
"--lines", action="store_true", help="split lines instead of docs"
)
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, "r", encoding="utf-8") as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, "w", encoding="utf-8") as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, "w", encoding="utf-8") as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/split_train_valid_docs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description="symmetric alignment builer")
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, "fast_align")
symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal")
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl"
)
# create joined file
joined_file = os.path.join(args.output_dir, "text.joined")
with open(args.source_file, "r", encoding="utf-8") as src, open(
args.target_file, "r", encoding="utf-8"
) as tgt:
with open(joined_file, "w", encoding="utf-8") as joined:
for s, t in zip_longest(src, tgt):
print("{} ||| {}".format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, "align.backward")
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, "align.forward")
fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file
)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, "align.backward")
bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file
)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, "aligned")
sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin,
)
assert os.system(sym_cmd) == 0
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/build_sym_alignment.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for decoding"
)
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(input):
return "".join(sp.DecodePieces(input))
elif args.input_format == "id":
def decode(input):
return "".join(sp.DecodeIds(input))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
if args.input_format == "id":
print(decode(list(map(tok2int, line.rstrip().split()))))
elif args.input_format == "piece":
print(decode(line.rstrip().split()))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/spm_decode.py |
EXA-1-master | exa/libraries/fairseq/scripts/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt")
pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt")
pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt")
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
"Recursively delete checkpoint files from `root_dir`, "
"but preserve checkpoint_best.pt and checkpoint_last.pt"
)
)
parser.add_argument("root_dirs", nargs="*")
parser.add_argument(
"--save-last", type=int, default=0, help="number of last checkpoints to save"
)
parser.add_argument(
"--save-every", type=int, default=0, help="interval of checkpoints to save"
)
parser.add_argument(
"--preserve-test",
action="store_true",
help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)",
)
parser.add_argument(
"--delete-best", action="store_true", help="delete checkpoint_best.pt"
)
parser.add_argument(
"--delete-last", action="store_true", help="delete checkpoint_last.pt"
)
parser.add_argument(
"--no-dereference", action="store_true", help="don't dereference symlinks"
)
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
not os.path.basename(root).startswith("test_") or args.preserve_test
) and (
(file == "checkpoint_last.pt" and not args.delete_last)
or (file == "checkpoint_best.pt" and not args.delete_best)
or file in to_save
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print("Nothing to do.")
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print("Operations to perform (in order):")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(" - preserve (and dereference symlink): " + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(" - preserve: " + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(" - delete: " + file)
while True:
resp = input("Continue? (Y/N): ")
if resp.strip().lower() == "y":
break
elif resp.strip().lower() == "n":
sys.exit(0)
print("Executing...")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print("rm " + file)
os.remove(file)
print("cp {} {}".format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print("rm " + file)
os.remove(file)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/rm_pt.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--gzip", action="store_true")
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, "r")
else:
return open(args.input, "r", encoding="utf-8")
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/count_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for encoding"
)
parser.add_argument(
"--inputs", nargs="+", default=["-"], help="input files to filter/encode"
)
parser.add_argument(
"--outputs", nargs="+", default=["-"], help="path to save encoded outputs"
)
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument(
"--min-len",
type=int,
metavar="N",
help="filter sentence pairs with fewer than N tokens",
)
parser.add_argument(
"--max-len",
type=int,
metavar="N",
help="filter sentence pairs with more than N tokens",
)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(input):
return sp.EncodeAsPieces(input)
elif args.output_format == "id":
def encode(input):
return list(map(str, sp.EncodeAsIds(input)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (args.min_len is None or len(line) >= args.min_len) and (
args.max_len is None or len(line) <= args.max_len
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/spm_encode.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into shards while respecting document boundaries. Documents
should be separated by a single empty line.
"""
import argparse
import contextlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--num-shards", type=int)
args = parser.parse_args()
assert args.num_shards is not None and args.num_shards > 1
with open(args.input, "r", encoding="utf-8") as h:
with contextlib.ExitStack() as stack:
outputs = [
stack.enter_context(
open(args.input + ".shard" + str(i), "w", encoding="utf-8")
)
for i in range(args.num_shards)
]
doc = []
first_doc = [True] * args.num_shards
def output_doc(i):
if not first_doc[i]:
outputs[i].write("\n")
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if line.strip() == "": # empty line indicates new document
output_doc(num_docs % args.num_shards)
num_docs += 1
else:
doc.append(line)
output_doc(num_docs % args.num_shards)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/shard_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
| EXA-1-master | exa/libraries/fairseq/scripts/spm_train.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import os
import re
import torch
from fairseq.file_io import PathManager
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt")
else:
pt_regexp = re.compile(r"checkpoint(\d+)\.pt")
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception(
"Found {} checkpoint files but need at least {}", len(entries), n
)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description="Tool to average the params of input checkpoints to "
"produce a new checkpoint",
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the '
'path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by'
' input, and average last this many of them.')
num_group.add_argument('--num-best-checkpoints', type=int, default=0,
help='if set, will try to find checkpoints with names checkpoint_best_ee_xx.pt in the path specified by'
' input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be'
' averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would'
' be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (
args.num_epoch_checkpoints is not None
or args.num_update_checkpoints is not None
), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints"
assert (
args.num_epoch_checkpoints is None or args.num_update_checkpoints is None
), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints"
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs,
num,
is_update_based,
upper_bound=args.checkpoint_upper_bound,
)
print("averaging checkpoints: ", args.inputs)
if args.num_best_checkpoints > 0:
args.inputs = list(
sorted(
args.inputs,
key=lambda x: float(
os.path.basename(x).split("_")[-1].replace(".pt", "")
),
)
)
args.inputs = args.inputs[: args.num_best_checkpoints]
for path in args.inputs:
print(os.path.basename(path))
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, "wb") as f:
torch.save(new_state, f)
print("Finished writing averaged checkpoint to {}".format(args.output))
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/average_checkpoints.py |
from pathlib import Path
import os
cwd = Path(".").resolve()
print("running 'check_installation.py' from:", cwd)
# Old versions of numpy/torch can prevent loading the .so files
import torch
print("torch:", torch.__version__)
import numpy
print("numpy:", numpy.__version__)
import fairseq
print("Fairseq installed at:", fairseq.__file__)
import fairseq.criterions
import fairseq.dataclass.configs
import _imp
print("Should load following .so suffixes:", _imp.extension_suffixes())
so_files = list(Path(fairseq.__file__).parent.glob("*.so"))
so_files.extend(Path(fairseq.__file__).parent.glob("data/*.so"))
print("Found following .so files:")
for so_file in so_files:
print(f"- {so_file}")
from fairseq import libbleu
print("Found libbleu at", libbleu.__file__)
from fairseq.data import data_utils_fast
print("Found data_utils_fast at", data_utils_fast.__file__)
| EXA-1-master | exa/libraries/fairseq/scripts/check_installation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import Dictionary, data_utils, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description="writes text from binarized file to stdout"
)
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default="lazy",
)
for tensor_line in dataset:
if dictionary is None:
line = " ".join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/fairseq/scripts/read_binarized.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
"""Reads in a fairseq output file, and verifies that the constraints
(C- lines) are present in the output (the first H- line). Assumes that
constraints are listed prior to the first hypothesis.
"""
constraints = []
found = 0
total = 0
for line in sys.stdin:
if line.startswith("C-"):
constraints.append(line.rstrip().split("\t")[1])
elif line.startswith("H-"):
text = line.split("\t")[2]
for constraint in constraints:
total += 1
if constraint in text:
found += 1
else:
print(f"No {constraint} in {text}", file=sys.stderr)
constraints = []
print(f"Found {found} / {total} = {100 * found / total:.1f}%")
| EXA-1-master | exa/libraries/fairseq/scripts/constraints/validate.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
main(args)
| EXA-1-master | exa/libraries/fairseq/scripts/constraints/extract.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Follows PEP-0440 version scheme guidelines
# https://www.python.org/dev/peps/pep-0440/#version-scheme
#
# Examples:
# 0.1.0.devN # Developmental release
# 0.1.0aN # Alpha release
# 0.1.0bN # Beta release
# 0.1.0rcN # Release Candidate
# 0.1.0 # Final release
__version__ = "0.1.0b0"
| EXA-1-master | exa/libraries/multimodal-main/version.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import sys
from datetime import date
from setuptools import find_packages, setup
def clean_html(raw_html):
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html).strip()
return cleantext
def get_version():
# get version string from version.py
version_file = os.path.join(os.path.dirname(__file__), "version.py")
version_regex = r"__version__ = ['\"]([^'\"]*)['\"]"
with open(version_file, "r") as f:
version = re.search(version_regex, f.read(), re.M).group(1)
return version
def fetch_long_description():
with open("README.md", encoding="utf8") as f:
readme = f.read()
# https://stackoverflow.com/a/12982689
readme = clean_html(readme)
return readme
def read_requirements(file):
with open(file) as f:
reqs = f.read()
return reqs.strip().split("\n")
def get_nightly_version():
today = date.today()
return f"{today.year}.{today.month}.{today.day}"
def parse_args(argv): # Pass in a list of string from CLI
parser = argparse.ArgumentParser(description="torchmultimodal setup")
parser.add_argument(
"--package_name",
type=str,
default="torchmultimodal",
help="The name of this output wheel",
)
return parser.parse_known_args(argv)
if __name__ == "__main__":
args, unknown = parse_args(sys.argv[1:])
# Set up package name and version
name = args.package_name
is_nightly = "nightly" in name
version = get_nightly_version() if is_nightly else get_version()
print(f"-- {name} building version: {version}")
sys.argv = [sys.argv[0]] + unknown
setup(
name=name,
include_package_data=True,
packages=find_packages(
exclude=("examples*", "tests*")
), # Excluded folders don't get packaged
python_requires=">=3.7",
install_requires=read_requirements("requirements.txt"),
version=version,
description="PyTorch Multimodal Library",
long_description=fetch_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/multimodal",
author="PyTorch Multimodal Team",
author_email="[email protected]",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
extras_require={"dev": read_requirements("dev-requirements.txt")},
)
| EXA-1-master | exa/libraries/multimodal-main/setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import pytest
import torch
import torch.distributed as dist
from torch import nn, Tensor
def gpu_test(gpu_count: int = 1):
"""
Annotation for GPU tests, skipping the test if the
required amount of GPU is not available
"""
message = f"Not enough GPUs to run the test: required {gpu_count}"
return pytest.mark.skipif(torch.cuda.device_count() < gpu_count, reason=message)
def init_distributed_on_file(world_size: int, gpu_id: int, sync_file: str):
"""
Init the process group need to do distributed training, by syncing
the different workers on a file.
"""
torch.cuda.set_device(gpu_id)
dist.init_process_group(
backend="nccl",
init_method="file://" + sync_file,
world_size=world_size,
rank=gpu_id,
)
@contextmanager
def with_temp_files(count: int):
"""
Context manager to create temporary files and remove them
after at the end of the context
"""
if count == 1:
fd, file_name = tempfile.mkstemp()
yield file_name
os.close(fd)
else:
temp_files = [tempfile.mkstemp() for _ in range(count)]
yield [t[1] for t in temp_files]
for t in temp_files:
os.close(t[0])
def set_rng_seed(seed):
"""Sets the seed for pytorch and numpy random number generators"""
torch.manual_seed(seed)
random.seed(seed)
_ASSET_DIR = (Path(__file__).parent / "assets").resolve()
def get_asset_path(file_name: str) -> str:
"""Get the path to the file under assets directory."""
return str(_ASSET_DIR.joinpath(file_name))
def assert_expected(
actual: Any,
expected: Any,
rtol: Optional[float] = None,
atol: Optional[float] = None,
check_device=True,
):
torch.testing.assert_close(
actual,
expected,
rtol=rtol,
atol=atol,
check_device=check_device,
msg=f"actual: {actual}, expected: {expected}",
)
def tuple_to_dict(t: Tuple) -> Dict:
if not isinstance(t, tuple):
raise TypeError(f"Input must be of type tuple but got {type(t)}")
return {k: v for k, v in enumerate(t)}
def is_namedtuple(nt: Any) -> bool:
# namedtuple is a subclass of tuple with additional attributes
# we verify specifically here the attribute `_fields` which should be a tuple of field names
# from the namedtuple instance
if not isinstance(nt, tuple):
return False
f = getattr(nt, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(name) == str for name in f)
def namedtuple_to_dict(nt: NamedTuple) -> Dict:
# Do this for safety. _asdict is a public method as of python 3.8:
# https://docs.python.org/3/library/collections.html#collections.somenamedtuple._asdict
if not hasattr(nt, "_asdict"):
raise AttributeError(f"{nt} must have the attribute `_asdict`.")
return nt._asdict()
def assert_expected_namedtuple(
actual: Union[Dict, NamedTuple],
expected: Union[Dict, NamedTuple],
rtol: Optional[float] = None,
atol: Optional[float] = None,
):
"""Helper function that calls assert_expected recursively on nested Dict/NamedTuple
Example::
>>>from collections import namedtuple
>>>Out = namedtuple("Out", "x y")
>>>InnerOut = namedtuple("InnerOut", "z")
>>>actual = Out(x=InnerOut(z=tensor([1])), y=tensor([2]))
>>>expected = Out(x=InnerOut(z=tensor([1])), y=tensor([2]))
>>>assert_expected_namedtuple(actual, expected)
"""
# convert NamedTuple to dictionary
if is_namedtuple(actual):
actual = namedtuple_to_dict(actual)
if is_namedtuple(expected):
expected = namedtuple_to_dict(expected)
if not isinstance(actual, Dict):
raise TypeError(
f"'actual' needs to be either of type 'NamedTuple' or 'Dict' but got {type(actual)}"
)
if not isinstance(expected, Dict):
raise TypeError(
f"'expected' needs to be either of type 'NamedTuple' or 'Dict' but got {type(expected)}"
)
for attr, _expected in expected.items():
_actual = actual[attr]
if _expected is None:
# optional output
assert _actual is None
elif isinstance(_actual, Dict):
# dictionary output, e.g., cache of k/v
assert_expected_namedtuple(_actual, _expected, rtol=rtol, atol=atol)
elif isinstance(_actual, tuple) and (not is_namedtuple(_actual)):
# outputs are from multiple layers: (Tensor, Tensor, ...)
assert_expected_namedtuple(
tuple_to_dict(_actual), tuple_to_dict(_expected), rtol=rtol, atol=atol
)
elif is_namedtuple(_actual):
# output is another named tuple instance
assert_expected_namedtuple(_actual, _expected, rtol=rtol, atol=atol)
elif isinstance(_actual, Tensor):
# single tensor output
if isinstance(_expected, tuple) and len(_expected) == 2:
# test shape and sum
_expected_shape, _expected_sum = _expected
assert_expected(_actual.shape, _expected_shape)
assert_expected(
_actual.sum().item(), _expected_sum, rtol=rtol, atol=atol
)
elif isinstance(_expected, Tensor):
# test value
assert_expected(_actual, _expected, rtol=rtol, atol=atol)
else:
raise TypeError(
f"Unsupported type for expected when actual is a tensor: {type(_expected)}"
)
else:
raise TypeError(
f"Unsupported types for test assertion: actual {type(_actual)}, expected {type(_expected)}"
)
def init_weights_with_constant(model: nn.Module, constant: float = 1.0) -> None:
for p in model.parameters():
nn.init.constant_(p, constant)
| EXA-1-master | exa/libraries/multimodal-main/tests/test_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/tests/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected
from torch import nn
from torch.utils.checkpoint import checkpoint
from torchmultimodal.utils.common import (
checkpoint_wrapper,
shift_dim,
tensor_slice,
to_tuple_tuple,
)
def test_shift_dim():
test_random_tensor = torch.randn(2, 2, 2, 2, 2)
actual = shift_dim(test_random_tensor, 1, -1)
expected = test_random_tensor.permute(0, 2, 3, 4, 1).contiguous()
assert_expected(actual, expected)
actual = shift_dim(test_random_tensor, -3, 3)
expected = test_random_tensor.permute(0, 1, 3, 2, 4).contiguous()
assert_expected(actual, expected)
class TestTensorSlice:
@pytest.fixture(scope="class")
def test_input(self):
return torch.tensor([[[0, 1], [2, 3], [5, 6]]])
def test_default(self, test_input):
actual = tensor_slice(test_input, [0, 1, 0], [1, 1, 2])
expected = torch.tensor([[[2, 3]]])
assert_expected(actual, expected)
def test_size_minus_one(self, test_input):
"""Test size -1"""
actual = tensor_slice(test_input, [0, 1, 0], [1, -1, 2])
expected = torch.tensor([[[2, 3], [5, 6]]])
assert_expected(actual, expected)
def test_uneven_begin_size(self, test_input):
"""Test uneven begin and size vectors"""
actual = tensor_slice(test_input, [0, 1, 0], [1, 1])
expected = torch.tensor([[[2, 3]]])
assert_expected(actual, expected)
actual = tensor_slice(test_input, [0, 1], [1, 1, 2])
expected = torch.tensor([[[2, 3]]])
assert_expected(actual, expected)
@pytest.mark.xfail(raises=ValueError, reason="Invalid begin")
def test_invalid_begin(self, test_input):
tensor_slice(test_input, [-1, 1, 0], [1, 1, 2])
@pytest.mark.xfail(raises=ValueError, reason="Invalid size")
def test_invalid_size(self, test_input):
tensor_slice(test_input, [0, 1, 0], [-2, 1, 2])
class TestToTupleTuple:
@pytest.fixture(scope="class")
def expected(self):
return ((2, 2, 2), (2, 2, 2), (2, 2, 2))
def test_int(self, expected):
actual = to_tuple_tuple(2, 3, 3)
assert actual == expected, "int -> tuple[tuple] incorrect"
def test_tuple(self, expected):
actual = to_tuple_tuple((2, 2, 2), 3, 3)
assert actual == expected, "tuple -> tuple[tuple] incorrect"
class TestCheckpointWrapper:
@pytest.fixture
def model(self):
class DummyAttention(nn.Module):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.ones(3))
def forward(self, x, y, attn_mask=None, use_cache=None):
grad = x if use_cache else y
grad = grad * self.param
if attn_mask is not None:
grad = grad * attn_mask
return grad
class DummyIdentity(nn.Module):
"""Returns a passthrough of the input tensor with requires_grad True"""
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.ones(1))
def forward(self, x):
return self.param * x
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.attention = DummyAttention()
self.identity = DummyIdentity()
@checkpoint_wrapper
def _layer_to_wrap(self, x, y, attn_mask, use_cache):
return self.attention(x, y, attn_mask, use_cache)
def forward(self, x, y, attn_mask=None, use_cache=False):
x = self.identity(x)
y = self.identity(y)
out = self._layer_to_wrap(
x, y, attn_mask=attn_mask, use_cache=use_cache
)
return out
return DummyModel()
@pytest.fixture
def inputs(self):
x = torch.ones(3)
y = torch.ones(3) * 2
attn_mask = torch.tensor([1, 1, 0])
x = x
y = y
attn_mask = attn_mask
return x, y, attn_mask
@pytest.fixture
def compute_grad(self, model):
model.zero_grad()
def _compute_grad(output):
output.sum().backward()
grad_checkpointed = {}
for name, param in model.named_parameters():
grad_checkpointed[name] = param.grad.data.clone()
return grad_checkpointed
return _compute_grad
def test_training_mode(self, model, inputs, compute_grad, mocker):
"""Test training mode that checkpoint is on"""
mock_checkpoint = mocker.patch(
"torchmultimodal.utils.common.checkpoint", wraps=checkpoint
)
with pytest.warns(UserWarning):
model.train()
x, y, attn_mask = inputs
actual = model(x, y, attn_mask=attn_mask, use_cache=True)
# gradient of attention.param is y * attn_mask when use_cache is False (checkpointing on)
expected = torch.tensor([2.0, 2.0, 0.0])
assert_expected(actual, expected)
actual_grad = compute_grad(actual)
assert_expected(
actual_grad["attention.param"], torch.tensor([2.0, 2.0, 0.0])
)
mock_checkpoint.assert_called_once()
def test_eval_model(self, model, inputs, compute_grad, mocker):
"""Test eval mode that checkpoint is off"""
mock_checkpoint = mocker.patch(
"torchmultimodal.utils.common.checkpoint", wraps=checkpoint
)
model.eval()
x, y, attn_mask = inputs
actual = model(x, y, attn_mask=attn_mask, use_cache=True)
# gradient of attention.param is x * attn_mask when use_cache is True (checkpointing off)
expected = torch.tensor([1.0, 1.0, 0.0])
assert_expected(actual, expected)
actual_grad = compute_grad(actual)
assert_expected(actual_grad["attention.param"], torch.tensor([1.0, 1.0, 0.0]))
mock_checkpoint.assert_not_called()
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_common.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from tests.test_utils import assert_expected
from torchmultimodal.utils.attention import get_causal_attention_mask
def test_get_causal_attention_masks():
actual = get_causal_attention_mask(3, 2)
expected = torch.tensor(
[
[1.0, 0.0],
[1.0, 1.0],
[1.0, 1.0],
]
)
assert_expected(actual, expected)
actual = get_causal_attention_mask(3, 3)
expected = torch.tensor(
[
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
]
)
assert_expected(actual, expected)
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_attention_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.video_gpt import video_gpt
from torchmultimodal.utils.generate import (
GenerationUtil,
get_logits_mask,
LogitsFilterTopK,
LogitsFilterTopP,
SampleOutput,
)
@pytest.fixture(autouse=True)
def set_seed():
set_rng_seed(4)
class TestLogitsMask:
def test_normal(self):
actual = get_logits_mask(
in_seq_len=3, out_seq_len=4, num_in_tokens=4, num_out_tokens=6
)
expected = torch.tensor(
[
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
]
)
assert_expected(actual, expected)
def test_zero_dims(self):
actual = get_logits_mask(
in_seq_len=0, out_seq_len=0, num_in_tokens=0, num_out_tokens=0
)
assert actual.nelement() == 0
def test_in_seq_only(self):
actual = get_logits_mask(
in_seq_len=1, out_seq_len=0, num_in_tokens=4, num_out_tokens=6
)
expected = torch.tensor([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
assert_expected(actual, expected)
def test_out_seq_only(self):
actual = get_logits_mask(
in_seq_len=0, out_seq_len=1, num_in_tokens=4, num_out_tokens=6
)
expected = torch.tensor([[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]])
assert_expected(actual, expected)
class TestGenerationUtil:
_model_params = {
"input_shape": (4, 8, 8),
"latent_shape": (2, 4, 4),
"d_model": 576,
"n_head": 4,
"num_decoder_layers": 16,
"dropout": 0.2,
"attn_dropout": 0.3,
}
@pytest.fixture
def model_fn(self):
return video_gpt
@pytest.fixture
def generation_model(self, model_fn):
model = model_fn(**self._model_params)
model.eval()
return GenerationUtil(model=model)
def test_model_eval_warning(self, model_fn):
model = model_fn(**self._model_params)
with pytest.warns(UserWarning):
generator = GenerationUtil(model=model)
def test_sample(self, generation_model):
input_shape = self._model_params["input_shape"]
latent_shape = self._model_params["latent_shape"]
latent_seq_len = torch.prod(torch.tensor(latent_shape)).item()
x = torch.randn(1, 3, *input_shape) # (b, c, *input_shape)
out = generation_model.sample(
x, max_seq_len=latent_seq_len, use_cache=True, causal=True
)
assert isinstance(out, SampleOutput)
actual = out.tokens.shape
expected = torch.Size([1, 32])
assert_expected(actual, expected)
def test_filter_logits(self, generation_model):
kwargs = {"top_k": 5, "top_p": 0.7}
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = generation_model._filter_logits(logits, **kwargs)
# pick top 5 tokens then take those > 70% percentile
expected = torch.tensor(
[
[
-float("inf"),
-float("inf"),
-float("inf"),
-float("inf"),
-float("inf"),
-float("inf"),
-float("inf"),
-float("inf"),
8.0,
9.0,
]
]
)
assert_expected(actual, expected)
class TestLogitsFilterTopK:
_func_params = {
"top_k": 0,
"filter_value": 0.0,
"min_tokens_to_keep": 1,
}
@pytest.fixture
def filter_fn(self):
return LogitsFilterTopK
def test_min_tokens_to_keep(self, filter_fn):
kwargs = {**self._func_params, **{"top_k": 1, "min_tokens_to_keep": 2}}
logits_filter = filter_fn(**kwargs)
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
expected = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 9.0]])
assert_expected(actual, expected)
def test_top_k_invalid(self, filter_fn):
kwargs = {**self._func_params, **{"top_k": -1}}
with pytest.raises(ValueError):
logits_filter = filter_fn(**kwargs)
def test_default(self, filter_fn):
kwargs = self._func_params
logits_filter = filter_fn(**kwargs)
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
expected = logits
assert_expected(actual, expected)
def test_top_k(self, filter_fn):
kwargs = {**self._func_params, **{"top_k": 5}}
logits_filter = filter_fn(**kwargs)
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
expected = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 6.0, 7.0, 8.0, 9.0]])
assert_expected(actual, expected)
class TestLogitsFilterTopP:
_func_params = {
"top_p": 1.0,
"filter_value": 0.0,
"min_tokens_to_keep": 1,
}
@pytest.fixture
def filter_fn(self):
return LogitsFilterTopP
def test_min_tokens_to_keep(self, filter_fn):
kwargs = {**self._func_params, **{"top_p": 0.0, "min_tokens_to_keep": 2}}
logits_filter = filter_fn(**kwargs)
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
expected = torch.tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 9.0]])
assert_expected(actual, expected)
def test_top_p_invalid(self, filter_fn):
kwargs = {**self._func_params, **{"top_p": 2.0}}
with pytest.raises(ValueError):
logits_filter = filter_fn(**kwargs)
kwargs = {**self._func_params, **{"top_p": -1.0}}
with pytest.raises(ValueError):
logits_filter = filter_fn(**kwargs)
def test_default(self, filter_fn):
kwargs = self._func_params
logits_filter = filter_fn(**kwargs)
logits = torch.arange(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
expected = logits
assert_expected(actual, expected)
def test_top_p(self, filter_fn):
kwargs = {**self._func_params, **{"top_p": 0.9}}
logits_filter = filter_fn(**kwargs)
logits = torch.ones(10, dtype=torch.float).unsqueeze(0)
actual = logits_filter(logits)
# 9 tokens should be kept as the logits are of uniform distribution
expected = torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]])
assert_expected(actual, expected)
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_generate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
from torchmultimodal.utils.common import load_module_from_url
def test_load_module_from_url():
model = nn.Linear(2, 3)
load_module_from_url(
model, "https://download.pytorch.org/models/multimodal/test/linear_2_3.pt"
)
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_ckpt_load.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torchmultimodal.utils.assertion import assert_equal_lengths
class TestAssertEqualLengths:
def test_different_lengths(self):
with pytest.raises(ValueError):
assert_equal_lengths([1], (1, 1))
def test_same_lengths(self):
assert_equal_lengths([1, 1], (1, 1))
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_assertion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.multiprocessing as mp
from tests.test_utils import (
assert_expected,
gpu_test,
init_distributed_on_file,
set_rng_seed,
with_temp_files,
)
from torch import Tensor
from torchmultimodal.utils.distributed import gather_tensor
BATCH_SIZE = 4
EMBEDDING_DIM = 8
class TestGatherTensor:
"""
Test gather_tensor method with backprop_in_gather param
"""
@pytest.fixture(autouse=True)
def setup(self):
set_rng_seed(1234)
@pytest.fixture
def input_tensor(self):
return torch.randn(BATCH_SIZE, EMBEDDING_DIM)
@staticmethod
def _worker(
gpu_id: int,
world_size: int,
sync_file: str,
input_tensor: Tensor,
backprop_in_gather: bool,
):
init_distributed_on_file(
world_size=world_size, gpu_id=gpu_id, sync_file=sync_file
)
gpu_tensor = input_tensor.clone().requires_grad_().to(gpu_id) + gpu_id
expected_output = [input_tensor + i for i in range(world_size)]
gathered_output = gather_tensor(gpu_tensor, backprop_in_gather)
assert_expected(len(expected_output), len(gathered_output))
for i, tensor in enumerate(gathered_output):
assert_expected(tensor, expected_output[i], check_device=False)
if i == gpu_id or backprop_in_gather:
assert tensor.grad_fn is not None
else:
assert tensor.grad_fn is None
@gpu_test(gpu_count=1)
@pytest.mark.parametrize("backprop_in_gather", [True, False])
def test_single_gpu_gather(self, input_tensor: Tensor, backprop_in_gather: bool):
world_size = 1
with with_temp_files(count=1) as sync_file:
mp.spawn(
TestGatherTensor._worker,
(
world_size,
sync_file,
input_tensor,
backprop_in_gather,
),
nprocs=world_size,
)
@gpu_test(gpu_count=2)
@pytest.mark.parametrize("backprop_in_gather", [True, False])
def test_multi_gpu_gather(self, input_tensor: Tensor, backprop_in_gather: bool):
world_size = 2
with with_temp_files(count=1) as sync_file:
mp.spawn(
TestGatherTensor._worker,
(
world_size,
sync_file,
input_tensor,
backprop_in_gather,
),
nprocs=world_size,
)
| EXA-1-master | exa/libraries/multimodal-main/tests/utils/test_distributed.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected
from torchmultimodal.models.late_fusion import LateFusion
from torchmultimodal.modules.fusions.concat_fusion import ConcatFusionModule
class TestLateFusion:
@pytest.fixture
def encoders(self):
return torch.nn.ModuleDict(
{"c1": torch.nn.Identity(), "c2": torch.nn.Identity()}
)
@pytest.fixture
def fusion_module(self):
return ConcatFusionModule()
@pytest.fixture
def head_module(self):
return torch.nn.Identity()
@pytest.fixture
def late_fusion(self, encoders, fusion_module, head_module):
return LateFusion(
encoders,
fusion_module,
head_module,
)
@pytest.fixture
def modalities_1(self):
return {
"c1": torch.Tensor(
[
[1, 0, 0.25, 0.75],
[0, 1, 0.6, 0.4],
]
),
"c2": torch.Tensor(
[
[3, 1, 0.8, 0.9],
[0.7, 2, 0.6, 0],
]
),
}
@pytest.fixture
def modalities_2(self):
return {
"c1": torch.Tensor(
[
[7, 0, 0.65],
[88, 5, 0.3],
]
),
"c2": torch.Tensor(
[
[8, 9, 0.8],
[0.74, 2, 0],
]
),
}
@pytest.fixture
def modalities_3(self):
return {
"c3": torch.Tensor(
[
[8, 0, 0.5, 0.7],
[1, 6, 0.6, 0.4],
]
),
}
def test_forward(self, late_fusion, modalities_1):
actual = late_fusion(modalities_1)
expected = torch.Tensor(
[[1, 0, 0.25, 0.75, 3, 1, 0.8, 0.9], [0, 1, 0.6, 0.4, 0.7, 2, 0.6, 0]]
)
assert_expected(actual, expected)
def test_script(self, late_fusion, modalities_2):
scripted_late_fusion = torch.jit.script(late_fusion)
actual = scripted_late_fusion(modalities_2)
expected = torch.Tensor([[7, 0, 0.65, 8, 9, 0.8], [88, 5, 0.3, 0.74, 2, 0]])
assert_expected(actual, expected)
def test_missing_key_in_modalities(self, late_fusion, modalities_3):
with pytest.raises(AssertionError):
late_fusion(modalities_3)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_late_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, assert_expected_namedtuple, set_rng_seed
from torch import nn
from torchmultimodal.models.vqvae import VQVAE
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
@pytest.fixture
def num_embeddings():
return 4
@pytest.fixture
def embedding_dim():
return 2
@pytest.fixture
def embedding_weights():
return torch.tensor([[6.0, 12.0], [3.0, 9.0], [14.0, 28.0], [7.0, 21.0]])
@pytest.fixture
def encoder():
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.layer = nn.Linear(2, 2, bias=False)
self.layer.weight = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))
def forward(self, x):
return self.layer(x)
def get_latent_shape(self, input_shape):
return input_shape # dummy method
return Encoder()
@pytest.fixture
def bad_encoder():
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.layer = nn.Linear(2, 2, bias=False)
self.layer.weight = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))
def forward(self, x):
return self.layer(x)
return Encoder()
@pytest.fixture
def decoder():
dec = nn.Linear(2, 2, bias=False)
dec.weight = nn.Parameter(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))
return dec
@pytest.fixture
def vqvae_builder(decoder, num_embeddings, embedding_dim, embedding_weights):
def _vqvae(encoder):
vqvae = VQVAE(encoder, decoder, num_embeddings, embedding_dim)
vqvae.codebook.embedding = embedding_weights
return vqvae.eval() # switch off embedding weights initialization
return _vqvae
@pytest.fixture
def indices():
return torch.tensor([[[1, 3], [0, 2]]]) # (b, d1, d2)
class TestVQVAE:
@pytest.fixture
def vqvae(self, vqvae_builder, encoder):
return vqvae_builder(encoder)
@pytest.fixture
def vqvae_bad_encoder(self, vqvae_builder, bad_encoder):
return vqvae_builder(bad_encoder)
@pytest.fixture
def vqvae_bad_codebook(self, vqvae_builder, encoder, mocker):
class BadCodebook(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
mock_codebook = mocker.patch(
"torchmultimodal.models.vqvae.Codebook", wraps=BadCodebook
)
return vqvae_builder(encoder), mock_codebook
@pytest.fixture
def x(self):
return torch.tensor(
[[[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]]]
) # (b, c, d1, d2)
@pytest.fixture
def expected_decoded(self):
return torch.tensor(
[[[[17.0, 37.0], [34.0, 74.0]], [[51.0, 111.0], [68.0, 148.0]]]]
)
@pytest.fixture
def expected_encoded_flat(self):
return torch.tensor(
[[3.0, 9.0], [7.0, 21.0], [6.0, 12.0], [14.0, 28.0]] # (b x d1 x d2, c)
)
@pytest.fixture
def expected_quantized_flat(self):
return torch.tensor(
[
[3.0, 9.0],
[7.0, 21.0],
[6.0, 12.0],
[14.0, 28.0],
] # (b x d1 x d2, emb_dim)
)
@pytest.fixture
def expected_quantized(self):
return torch.tensor(
[
[[[3.0, 7.0], [6.0, 14.0]], [[9.0, 21.0], [12.0, 28.0]]]
] # (b, emb_dim, d1, d2)
)
@pytest.fixture
def expected_codebook_indices(self, indices):
return indices
@pytest.fixture
def expected_codebook_output(
self,
expected_encoded_flat,
expected_quantized_flat,
expected_codebook_indices,
expected_quantized,
):
return {
"encoded_flat": expected_encoded_flat,
"quantized_flat": expected_quantized_flat,
"codebook_indices": expected_codebook_indices,
"quantized": expected_quantized,
}
def test_encode(self, vqvae, x, expected_codebook_indices):
actual_codebook_indices = vqvae.encode(x)
assert_expected(actual_codebook_indices, expected_codebook_indices)
def test_encode_return_embeddings(
self, vqvae, x, expected_quantized, expected_codebook_indices
):
actual_codebook_indices, actual_quantized = vqvae.encode(
x, return_embeddings=True
)
assert_expected(actual_quantized, expected_quantized)
assert_expected(actual_codebook_indices, expected_codebook_indices)
def test_decode(self, vqvae, indices, expected_decoded):
actual_decoded = vqvae.decode(indices)
assert_expected(actual_decoded, expected_decoded)
def test_forward(self, vqvae, x, expected_decoded, expected_codebook_output):
actual = vqvae(x)
expected = {
"decoded": expected_decoded,
"codebook_output": expected_codebook_output,
}
assert_expected_namedtuple(actual, expected)
def test_lookup(self, vqvae, indices):
actual = vqvae.lookup(indices)
expected = torch.tensor(
[[[[3.0, 9.0], [7.0, 21.0]], [[6.0, 12.0], [14.0, 28.0]]]]
)
assert_expected(actual, expected)
def test_latent_shape(self, vqvae):
actual = vqvae.latent_shape(input_shape=(1, 2, 3))
expected = (1, 2, 3)
assert_expected(actual, expected)
def test_latent_shape_bad_encoder(self, vqvae_bad_encoder):
with pytest.raises(AttributeError):
vqvae_bad_encoder.latent_shape(input_shape=(1, 2, 3))
def test_lookup_bad_codebook(self, vqvae_bad_codebook, indices):
vqvae, mock_codebook = vqvae_bad_codebook
with pytest.raises(AttributeError):
vqvae.lookup(indices)
mock_codebook.assert_called_once()
def test_num_embeddings(self, vqvae, num_embeddings):
actual = vqvae.num_embeddings
expected = num_embeddings
assert_expected(actual, expected)
def test_embedding_dim(self, vqvae, embedding_dim):
actual = vqvae.embedding_dim
expected = embedding_dim
assert_expected(actual, expected)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, assert_expected_namedtuple, set_rng_seed
from torchmultimodal.models.video_vqvae import (
AttentionResidualBlock,
preprocess_int_conv_params,
video_vqvae,
VideoDecoder,
VideoEncoder,
)
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
@pytest.fixture(scope="module")
def params():
in_channel_dims = (2, 2)
out_channel_dims = (2, 2)
kernel_sizes = ((2, 2, 2), (2, 2, 2))
strides = ((1, 1, 1), (1, 1, 1))
return in_channel_dims, out_channel_dims, kernel_sizes, strides
@pytest.fixture(scope="module")
def input_tensor():
return torch.ones(1, 2, 2, 2, 2)
class TestAttentionResidualBlock:
def test_hidden_dim_assertion(self):
with pytest.raises(ValueError):
_ = AttentionResidualBlock(1)
def test_forward(self):
block = AttentionResidualBlock(4)
x = 2 * torch.ones(1, 4, 2, 2, 2)
actual = block(x)
expected = torch.tensor(
[
[
[
[[2.4492, 2.4492], [2.4492, 2.4492]],
[[2.4492, 2.4492], [2.4492, 2.4492]],
],
[
[[2.3055, 2.3055], [2.3055, 2.3055]],
[[2.3055, 2.3055], [2.3055, 2.3055]],
],
[
[[1.9071, 1.9071], [1.9071, 1.9071]],
[[1.9071, 1.9071], [1.9071, 1.9071]],
],
[
[[1.7587, 1.7587], [1.7587, 1.7587]],
[[1.7587, 1.7587], [1.7587, 1.7587]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
class TestVideoEncoder:
@pytest.fixture
def encoder(self, params):
in_channel_dims, _, kernel_sizes, _ = params
def get_encoder(strides):
enc = VideoEncoder(
in_channel_dims=in_channel_dims,
kernel_sizes=kernel_sizes,
strides=strides,
output_dim=2,
n_res_layers=1,
attn_hidden_dim=2,
)
enc.eval()
return enc
return get_encoder
@pytest.fixture
def uneven_strides(self):
return ((2, 2, 2), (1, 2, 2))
@pytest.fixture
def big_input(self):
return torch.ones(1, 2, 4, 8, 8)
def test_forward(self, input_tensor, encoder, params):
strides = params[-1]
model = encoder(strides)
actual = model(input_tensor)
expected = torch.tensor(
[
[
[
[[-0.6480, -0.5961], [-0.6117, -0.6640]],
[[-0.7177, -0.7569], [-0.5477, -0.5710]],
],
[
[[-0.1906, -0.1636], [-0.2265, -0.1501]],
[[-0.1730, -0.1398], [-0.2598, -0.1510]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_latent_shape(self, big_input, encoder, uneven_strides):
downsampler = encoder(uneven_strides)
output = downsampler(big_input)
actual = output.shape[2:]
expected = downsampler.get_latent_shape(big_input.shape[2:])
assert_expected(actual, expected)
class TestVideoDecoder:
@pytest.fixture
def decoder(self, params):
_, out_channel_dims, kernel_sizes, strides = params
dec = VideoDecoder(
out_channel_dims=out_channel_dims,
kernel_sizes=kernel_sizes,
strides=strides,
input_dim=2,
n_res_layers=1,
attn_hidden_dim=2,
)
dec.eval()
return dec
def test_forward(self, input_tensor, decoder):
actual = decoder(input_tensor)
expected = torch.tensor(
[
[
[
[[-0.2129, -0.1894], [-0.2358, -0.2302]],
[[-0.2012, -0.1757], [-0.2264, -0.2067]],
],
[
[[-0.1252, -0.1220], [-0.1235, -0.1280]],
[[-0.1502, -0.1264], [-0.1551, -0.1490]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
class TestVideoVQVAE:
@pytest.fixture
def vv(self):
model = video_vqvae(
in_channel_dim=2,
encoder_hidden_dim=2,
encoder_kernel_size=2,
encoder_stride=1,
encoder_n_layers=2,
n_res_layers=1,
attn_hidden_dim=2,
num_embeddings=8,
embedding_dim=2,
decoder_hidden_dim=2,
decoder_kernel_size=2,
decoder_stride=1,
decoder_n_layers=2,
)
model.eval()
return model
@pytest.fixture
def expected_decoded(self):
return torch.tensor(
[
[
[
[[0.1547, 0.1720], [0.1354, 0.1029]],
[[0.0828, 0.1086], [0.0837, 0.0637]],
],
[
[[0.1914, 0.0667], [0.1442, -0.0180]],
[[0.0793, -0.0574], [0.0635, -0.0776]],
],
]
]
)
@pytest.fixture
def expected_codebook_output(self):
return {
"encoded_flat": torch.tensor(
[
[-0.6480, -0.1906],
[-0.5961, -0.1636],
[-0.6117, -0.2265],
[-0.6640, -0.1501],
[-0.7177, -0.1730],
[-0.7569, -0.1398],
[-0.5477, -0.2598],
[-0.5710, -0.1510],
]
),
"quantized_flat": torch.tensor(
[
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
[-0.1801, 0.2553],
]
),
"codebook_indices": torch.tensor(
[
[
[[4, 4], [4, 4]],
[[4, 4], [4, 4]],
],
]
),
"quantized": torch.tensor(
[
[
[
[[-0.1801, -0.1801], [-0.1801, -0.1801]],
[[-0.1801, -0.1801], [-0.1801, -0.1801]],
],
[
[[0.2553, 0.2553], [0.2553, 0.2553]],
[[0.2553, 0.2553], [0.2553, 0.2553]],
],
]
]
),
}
@pytest.fixture
def indices(self):
return torch.tensor(
[
[
[[4, 4], [4, 4]],
[[4, 4], [4, 4]],
],
]
)
def test_encode(self, vv, input_tensor, indices):
actual_codebook_indices = vv.encode(input_tensor)
expected_codebook_indices = indices
assert_expected(actual_codebook_indices, expected_codebook_indices)
def test_decode(self, vv, indices, expected_decoded):
actual = vv.decode(indices)
expected = expected_decoded
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_forward(
self, vv, input_tensor, expected_decoded, expected_codebook_output
):
actual = vv(input_tensor)
expected = {
"decoded": expected_decoded,
"codebook_output": expected_codebook_output,
}
assert_expected_namedtuple(actual, expected, rtol=0, atol=1e-4)
def test_preprocess_int_conv_params():
channels = (3, 3, 3)
kernel = 2
stride = 1
expected_kernel = torch.tensor(((2, 2, 2), (2, 2, 2), (2, 2, 2)))
expected_stride = torch.tensor(((1, 1, 1), (1, 1, 1), (1, 1, 1)))
actual_kernel, actual_stride = preprocess_int_conv_params(channels, kernel, stride)
actual_kernel = torch.tensor(actual_kernel)
actual_stride = torch.tensor(actual_stride)
assert_expected(actual_kernel, expected_kernel)
assert_expected(actual_stride, expected_stride)
actual_kernel = preprocess_int_conv_params(channels, kernel_sizes=kernel)
actual_kernel = torch.tensor(actual_kernel)
assert_expected(actual_kernel, expected_kernel)
actual_stride = preprocess_int_conv_params(channels, strides=stride)
actual_stride = torch.tensor(actual_stride)
assert_expected(actual_stride, expected_stride)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_video_vqvae.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/tests/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import pytest
import torch
from tests.test_utils import assert_expected
from torch import nn, Tensor
from torchmultimodal.models.late_fusion import LateFusion
from torchmultimodal.models.two_tower import TwoTower
from torchmultimodal.modules.fusions.concat_fusion import ConcatFusionModule
@pytest.fixture
def tower_fusion():
class Concat(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: List[Tensor]) -> Tensor:
return torch.cat(x, dim=-1)
return Concat()
class TestTwoTower:
@pytest.fixture
def tower_1(self):
return LateFusion(
{"c1": nn.Identity(), "c2": nn.Identity()},
ConcatFusionModule(),
nn.Identity(),
)
@pytest.fixture
def tower_2(self):
return LateFusion(
{"c3": nn.Identity(), "c4": nn.Identity()},
ConcatFusionModule(),
nn.Identity(),
)
@pytest.fixture
def batch_size(self):
return 3
@pytest.fixture
def data(self, batch_size):
return {
"c1": torch.rand(batch_size, 8),
"c2": torch.rand(batch_size, 16),
"c3": torch.rand(batch_size, 4),
"c4": torch.rand(batch_size, 12),
}
@pytest.fixture
def two_tower(self, tower_1, tower_2, tower_fusion):
return TwoTower(
tower_id_to_tower={"tower_1": tower_1, "tower_2": tower_2},
tower_fusion=tower_fusion,
)
@pytest.fixture
def shared_two_tower(self, tower_1, tower_fusion):
return TwoTower(
tower_id_to_tower={"tower_1": tower_1, "tower_2": tower_1},
tower_fusion=tower_fusion,
shared_tower_id_to_channel_mapping={"tower_2": {"c1": "c3", "c2": "c4"}},
)
@pytest.fixture
def shared_two_tower_scripting(self, tower_1, tower_fusion):
return TwoTower(
tower_id_to_tower={"tower_1": tower_1, "tower_2": tower_1},
tower_fusion=tower_fusion,
shared_tower_id_to_channel_mapping={"tower_2": {"c3": "c1", "c4": "c2"}},
)
def test_two_tower(self, two_tower, data, batch_size):
out = two_tower(data)
assert_expected(out[0].size(), (batch_size, 40))
def test_shared_two_tower(self, shared_two_tower, data, batch_size):
out = shared_two_tower(data)
assert_expected(out[0].size(), (batch_size, 40))
def test_two_tower_scripting(self, two_tower, shared_two_tower_scripting):
torch.jit.script(two_tower)
torch.jit.script(shared_two_tower_scripting)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_two_tower.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, assert_expected_namedtuple, set_rng_seed
from torch import nn
from torch.nn import functional as F
from torchmultimodal.models.gpt import (
MultimodalGPT,
MultimodalGPTOutput,
MultimodalTransformerDecoder,
RightShift,
TransformerDecoder,
TransformerDecoderLayer,
TransformerDecoderOutput,
TransformerLayerOutput,
)
from torchmultimodal.utils.common import shift_dim
@pytest.fixture(autouse=True)
def set_seed():
set_rng_seed(4)
@pytest.fixture
def d_model():
return 4
@pytest.fixture
def emb_dim():
return 5
@pytest.fixture
def num_emb():
return 6
@pytest.fixture
def latent_shape():
# the product of dims should equal out_seq_len
return (1, 1, 4)
@pytest.fixture
def in_seq_len():
return 3
@pytest.fixture
def out_seq_len():
return 4
@pytest.fixture
def n_head():
return 2
@pytest.fixture
def num_in_tokens():
return 4
@pytest.fixture
def num_out_tokens():
return 6
@pytest.fixture
def in_tokens(in_seq_len):
return torch.arange(in_seq_len).unsqueeze(0) # (b, seq_len)
@pytest.fixture
def out_tokens(out_seq_len):
return torch.arange(out_seq_len).unsqueeze(0) # (b, seq_len)
@pytest.fixture
def in_modality(in_seq_len, d_model):
return torch.rand(1, in_seq_len, d_model) # (b, seq_len, d_model)
@pytest.fixture
def out_modality(out_seq_len, d_model):
return torch.rand(1, out_seq_len, d_model) # (b, seq_len, d_model)
@pytest.fixture
def decoder_input(d_model):
return torch.rand(1, 3, d_model) # (b, seq_len, d_model)
@pytest.fixture
def attn_mask():
def _attn_mask(q_seq_len, k_seq_len=None):
if k_seq_len is None:
k_seq_len = q_seq_len
return torch.tril(torch.ones(q_seq_len, k_seq_len)) # (q_seq_len, k_seq_len)
return _attn_mask
@pytest.fixture
def head_mask(n_head):
def _head_mask(q_seq_len, k_seq_len=None):
if k_seq_len is None:
k_seq_len = q_seq_len
masked = torch.zeros(1, q_seq_len, k_seq_len)
unmasked = torch.ones(n_head - 1, q_seq_len, k_seq_len)
return torch.cat((masked, unmasked), dim=0) # (h, q_seq_len, k_seq_len)
return _head_mask
@pytest.fixture
def logits_mask(in_seq_len, out_seq_len, num_in_tokens, num_out_tokens):
total_seq_len = in_seq_len + out_seq_len
num_tokens = num_in_tokens + num_out_tokens
logits_mask = torch.ones(total_seq_len, num_tokens)
logits_mask[in_seq_len:, :num_in_tokens] = 0
logits_mask[:in_seq_len, num_in_tokens:] = 0
return logits_mask
@pytest.fixture
def num_layers():
return 1
@pytest.fixture
def right_shift(d_model):
return RightShift(d_model)
@pytest.fixture
def in_projection(d_model, emb_dim):
return nn.Linear(emb_dim, d_model)
@pytest.fixture
def out_projection(d_model, emb_dim):
return nn.Linear(emb_dim, d_model)
@pytest.fixture
def in_pos_emb(in_seq_len, d_model):
return nn.Embedding(in_seq_len, d_model)
@pytest.fixture
def out_pos_emb(out_seq_len, d_model):
return nn.Embedding(out_seq_len, d_model)
@pytest.fixture
def decoder_layer(n_head, d_model):
return TransformerDecoderLayer(d_model, n_head=n_head).eval()
@pytest.fixture
def decoder(decoder_layer, num_layers):
return TransformerDecoder(decoder_layer, num_layers)
@pytest.fixture
def mm_decoder(in_pos_emb, out_pos_emb, decoder, right_shift):
return MultimodalTransformerDecoder(in_pos_emb, out_pos_emb, decoder, right_shift)
@pytest.fixture
def tokenizer(num_emb, emb_dim):
class DummyTokenizer(nn.Module):
def __init__(self, num_emb, emb_dim):
super().__init__()
# encoder and decoder do not enter either the training or the token
# generation paths so we do not test their actual logic but only
# the interfaces.
self.encoder = nn.Identity()
self.decoder = nn.Identity()
self.embedding = nn.Parameter(
torch.arange(num_emb * emb_dim, dtype=torch.float).reshape(
num_emb, emb_dim
)
)
def encode(self, x):
return self.encoder(x)
def decode(self, token_ids):
return self.decoder(shift_dim(self.lookup(token_ids), -1, 1))
def lookup(self, token_ids):
return F.embedding(token_ids, self.embedding)
return DummyTokenizer(num_emb, emb_dim)
@pytest.fixture
def gpt(
d_model,
num_in_tokens,
num_out_tokens,
mm_decoder,
latent_shape,
tokenizer,
in_projection,
out_projection,
):
def _gpt(in_tokenizer=tokenizer, out_tokenizer=tokenizer, use_gpt_init=False):
return MultimodalGPT(
d_model=d_model,
num_in_tokens=num_in_tokens,
num_out_tokens=num_out_tokens,
latent_shape=latent_shape,
in_tokenizer=in_tokenizer,
out_tokenizer=out_tokenizer,
mm_decoder=mm_decoder,
in_projection=in_projection,
out_projection=out_projection,
norm_layer=None,
use_gpt_init=use_gpt_init,
).eval()
return _gpt
def get_pos_ids(x):
b, seq_len, _ = x.shape
pos_ids = torch.arange(seq_len, dtype=torch.long, device=x.device)
return pos_ids[None, :] # (1, seq_len)
class TestMultimodalGPT:
def test_tokenizers_missing_methods(self, gpt):
class BadTokenizer(nn.Module):
def __init__(self):
super().__init__()
with pytest.raises(AttributeError):
gpt(in_tokenizer=BadTokenizer())
with pytest.raises(AttributeError):
gpt(out_tokenizer=BadTokenizer())
def test_initialize_parameters(self, gpt, mocker):
# Testing mean and std of the initialized weights data requires a large
# amount samples to be statistically stable. Here we just test whether
# the method in question has been called to avoid test flakiness.
mock_init = mocker.patch("torchmultimodal.models.gpt.Tensor.normal_")
gpt = gpt(use_gpt_init=True)
mock_init.assert_called()
def test_encode_invalid_modality(self, gpt):
gpt = gpt()
with pytest.raises(ValueError):
gpt.encode(torch.randn(1, 2, 3), modality="abc")
def test_decode_tokens_wrong_shape(self, gpt):
bad_out_tokens = torch.arange(3) # seq_len no batch dim
gpt = gpt()
with pytest.raises(ValueError):
gpt.decode(bad_out_tokens)
def test_decode_tokens_reshape(self, gpt, out_tokens):
gpt = gpt()
actual = gpt.decode(out_tokens)
expected_shape = torch.Size([1, 5, 1, 1, 4]) # (b, emb_dim, *latent_shape)
assert_expected(actual.shape, expected_shape)
def test_lookup_invalid_modality(self, gpt):
gpt = gpt()
token_ids = torch.arange(3).unsqueeze(0)
with pytest.raises(ValueError):
gpt.lookup(token_ids, modality="abc")
def test_lookup_in_modality(self, gpt, in_tokens):
gpt = gpt()
actual = gpt.lookup(in_tokens, "in")
expected = torch.tensor(
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
],
]
)
assert_expected(actual, expected)
def test_lookup_out_modality(self, gpt, out_tokens):
gpt = gpt()
actual = gpt.lookup(out_tokens, "out")
expected = torch.tensor(
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
],
]
)
assert_expected(actual, expected)
def test_fwd_bad_input(self, gpt):
gpt = gpt()
with pytest.raises(ValueError):
gpt.fwd()
def test_fwd_for_generation(self, gpt, in_tokens, d_model, n_head, mocker):
"""Test autoregressive decoding for one step"""
gpt = gpt()
mock_right_shift = mocker.patch.object(
gpt.mm_decoder.right_shift,
"forward",
wraps=gpt.mm_decoder.right_shift.forward,
)
b, in_seq_len = in_tokens.shape
# learn the key/value representation from the full input modality sequence
with torch.no_grad():
actual = gpt.fwd(
in_tokens=in_tokens, use_cache=True, causal=True, right_shift=True
)
assert isinstance(actual, TransformerDecoderOutput)
# check that the key/value representation has been learnt
for layer_past_kv in actual.past_key_values:
assert_expected(
layer_past_kv["k"].shape,
torch.Size([1, 2, 3, 2]), # (b, n_head, in_seq_len, d_model // n_head)
)
assert_expected(
layer_past_kv["v"].shape,
torch.Size([1, 2, 3, 2]),
)
# right shift should be switched on to prepend SOS to input modality sequence
mock_right_shift.assert_called_once()
mock_right_shift.reset_mock()
# generate out_modality for one step
# take the last in_modality token as the starting token for out_modality generation
decode_step = 0
with torch.no_grad():
actual = gpt.fwd(
out_tokens=in_tokens[:, -1:],
out_pos_ids=torch.tensor([decode_step]).unsqueeze(0).repeat(b, 1),
use_cache=True,
causal=True,
)
# check that the key/value representation has increased by 1 unit
for layer_past_kv in actual.past_key_values:
assert_expected(
layer_past_kv["k"].shape,
torch.Size(
[1, 2, 4, 2]
), # (b, n_head, in_seq_len + 1, d_model // n_head)
)
assert_expected(
layer_past_kv["v"].shape,
torch.Size([1, 2, 4, 2]),
)
# right shift should be switched off as the "SOS" token for output modality
# is the last token of in_modality
mock_right_shift.assert_not_called()
def test_forward(
self,
gpt,
in_tokens,
out_tokens,
attn_mask,
head_mask,
):
gpt = gpt()
b, in_seq_len = in_tokens.shape
b, out_seq_len = out_tokens.shape
attn_mask = attn_mask(in_seq_len + out_seq_len)
head_mask = head_mask(in_seq_len + out_seq_len)
actual = gpt(
in_tokens=in_tokens,
out_tokens=out_tokens,
attn_mask=attn_mask,
head_mask=head_mask,
use_cache=True,
causal=True,
right_shift=True,
)
assert isinstance(actual, MultimodalGPTOutput)
expected = {
"decoder_output": {
"last_hidden_states": (
torch.Size([1, 7, 4]), # (b, seq_len, d_model)
64.5348,
),
"hidden_states": None,
"attention_weights": None,
"past_key_values": (
(
{
"k": (torch.Size([1, 2, 7, 2]), 8.3626),
"v": (torch.Size([1, 2, 7, 2]), 3.4256),
}
),
), # (num_layers, key/value, (b, n_head, seq_len, d_model // n_head)
},
"logits": (torch.Size([1, 7, 10]), 0.0), # (b, seq_len, tokens)
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_logits_mask(
self,
gpt,
in_tokens,
out_tokens,
attn_mask,
head_mask,
logits_mask,
):
gpt = gpt()
b, in_seq_len = in_tokens.shape
b, out_seq_len = out_tokens.shape
attn_mask = attn_mask(in_seq_len + out_seq_len)
head_mask = head_mask(in_seq_len + out_seq_len)
out = gpt(
in_tokens=in_tokens,
out_tokens=out_tokens,
attn_mask=attn_mask,
head_mask=head_mask,
use_cache=True,
causal=True,
right_shift=True,
logits_mask=logits_mask,
)
assert isinstance(out, MultimodalGPTOutput)
actual = out.logits # (b, seq_len, num_tokens)
max_neg_value = -torch.finfo(torch.float32).max
# assert each quandrant of the logits matrix (b, total_seq_len, num_total_tokens)
assert_expected(
actual[:, :3, :4], torch.zeros(1, 3, 4)
) # (b, in_seq_len, num_in_tokens)
assert_expected(
actual[:, :3, 4:], torch.ones(1, 3, 6) * max_neg_value
) # (b, in_seq_len, num_out_tokens)
assert_expected(
actual[:, 3:, :4], torch.ones(1, 4, 4) * max_neg_value
) # (b, out_seq_len, num_in_tokens)
assert_expected(
actual[:, 3:, 4:], torch.zeros(1, 4, 6)
) # (b, out_seq_len, num_out_tokens)
class TestMultimodalTransformerDecoder:
def test_bad_input(self, mm_decoder):
with pytest.raises(ValueError):
mm_decoder()
def test_forward_in_modality(self, mm_decoder, in_modality):
actual = mm_decoder(
in_modality=in_modality, in_pos_ids=get_pos_ids(in_modality)
)
expected = {
"last_hidden_states": (
torch.Size([1, 3, 4]),
0.2222,
), # (b, in_seq_len, d_model)
"hidden_states": None,
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_out_modality(self, mm_decoder, out_modality):
actual = mm_decoder(
out_modality=out_modality, out_pos_ids=get_pos_ids(out_modality)
)
expected = {
"last_hidden_states": (
torch.Size([1, 4, 4]),
5.2093,
), # (b, out_seq_len, d_model)
"hidden_states": None,
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_two_modality(self, mm_decoder, in_modality, out_modality):
actual = mm_decoder(
in_modality=in_modality,
out_modality=out_modality,
in_pos_ids=get_pos_ids(in_modality),
out_pos_ids=get_pos_ids(out_modality),
)
expected = {
"last_hidden_states": (
torch.Size([1, 7, 4]),
7.9519,
), # (b, in_seq_len + out_seq_len, d_model)
"hidden_states": None,
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_eval_right_shift_on(
self, mm_decoder, in_modality, out_modality, mocker
):
"""Test right shift is switched on during eval mode"""
mock_right_shift = mocker.patch.object(
mm_decoder.right_shift, "forward", wraps=mm_decoder.right_shift.forward
)
mm_decoder.eval()
actual = mm_decoder(
in_modality=in_modality,
out_modality=out_modality,
in_pos_ids=get_pos_ids(in_modality),
out_pos_ids=get_pos_ids(out_modality),
right_shift=True,
)
mock_right_shift.assert_called_once()
expected = {
"last_hidden_states": (
torch.Size([1, 7, 4]),
7.9519,
), # (b, in_seq_len + out_seq_len, d_model)
"hidden_states": None,
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_eval_right_shift_off(
self, mm_decoder, in_modality, out_modality, mocker
):
"""Test right shift is switched off during eval mode"""
mock_right_shift = mocker.patch.object(
mm_decoder.right_shift, "forward", wraps=mm_decoder.right_shift.forward
)
mm_decoder.eval()
actual = mm_decoder(
in_modality=in_modality,
out_modality=out_modality,
in_pos_ids=get_pos_ids(in_modality),
out_pos_ids=get_pos_ids(out_modality),
)
mock_right_shift.assert_not_called()
expected = {
"last_hidden_states": (
torch.Size([1, 7, 4]),
10.1681,
), # (b, in_seq_len + out_seq_len, d_model)
"hidden_states": None,
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_bad_pos_ids(self, mm_decoder, in_modality, in_seq_len):
in_pos_ids = torch.arange(
in_seq_len + 1, dtype=torch.long, device=in_modality.device
)[None, :]
with pytest.raises(ValueError):
mm_decoder._norm_pos_ids(in_modality, in_pos_ids)
def test_optional_pos_ids(self, mm_decoder, in_modality):
actual = mm_decoder._norm_pos_ids(in_modality)
expected = get_pos_ids(in_modality)
assert_expected(actual, expected)
class TestTransformerDecoder:
def test_forward_mask_extended(
self, decoder, decoder_input, attn_mask, head_mask, num_layers
):
b, seq_len, _ = decoder_input.shape
attn_mask = attn_mask(seq_len).unsqueeze(0) # add batch dim
head_mask = head_mask(seq_len).unsqueeze(0)
actual = decoder(decoder_input, attn_mask, head_mask)
assert isinstance(actual, TransformerDecoderOutput)
assert_expected(actual.last_hidden_states.shape, torch.Size([1, 3, 4]))
def test_forward(self, decoder, decoder_input, attn_mask, head_mask, num_layers):
b, seq_len, _ = decoder_input.shape
attn_mask = attn_mask(seq_len)
head_mask = head_mask(seq_len)
actual = decoder(decoder_input, attn_mask, head_mask)
assert isinstance(actual, TransformerDecoderOutput)
assert_expected(actual.last_hidden_states.shape, torch.Size([1, 3, 4]))
def test_forward_additional_output(self, decoder, decoder_input, num_layers):
actual = decoder(
decoder_input,
use_cache=True,
return_attn_weights=True,
return_hidden_states=True,
)
assert isinstance(actual, TransformerDecoderOutput)
assert_expected(actual.last_hidden_states.shape, torch.Size([1, 3, 4]))
assert_expected(
len(actual.hidden_states), num_layers + 1
) # +1 to include the input hidden_states
assert_expected(len(actual.attention_weights), num_layers)
assert_expected(len(actual.past_key_values), num_layers)
class TestTransformerDecoderLayer:
def test_forward(self, decoder_layer, decoder_input):
actual = decoder_layer(decoder_input)
assert isinstance(actual, TransformerLayerOutput)
expected = {
"hidden_states": (torch.Size([1, 3, 4]), 5.1956), # (b, seq_len, d_model)
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_masked(self, decoder_layer, decoder_input, attn_mask, head_mask):
b, seq_len, _ = decoder_input.shape
attn_mask = attn_mask(seq_len)
head_mask = head_mask(seq_len)
actual = decoder_layer(decoder_input, attn_mask, head_mask)
assert isinstance(actual, TransformerLayerOutput)
expected = {
"hidden_states": (torch.Size([1, 3, 4]), 7.0397), # (b, seq_len, seq_len)
"attention_weights": None,
"past_key_values": None,
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_forward_additional_output(self, decoder_layer, decoder_input):
actual = decoder_layer(decoder_input, use_cache=True, return_attn_weights=True)
assert isinstance(actual, TransformerLayerOutput)
expected = {
"hidden_states": (torch.Size([1, 3, 4]), 5.1956), # (b, seq_len, seq_len)
"attention_weights": (
torch.Size([1, 2, 3, 3]),
6.0,
), # (b, h, seq_len, seq_len)
"past_key_values": {
"k": ([1, 2, 3, 2], 4.8075),
"v": ([1, 2, 3, 2], -5.6613),
}, # (b, h, seq_len, d_model//h)
}
assert_expected_namedtuple(actual, expected, rtol=1e-5, atol=1e-4)
def test_right_shift(right_shift, d_model):
x = torch.ones(1, 3, d_model) # (b, seq_len, d_model)
actual = right_shift(x)
expected = torch.tensor(
[
[
[-0.0321, 0.0046, 0.0448, 0.0169],
[1.0000, 1.0000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.0000, 1.0000],
]
]
)
assert_expected(actual, expected, rtol=1e-5, atol=1e-4)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torchmultimodal.models.omnivore as omnivore
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.utils.common import get_current_device
@pytest.fixture(autouse=True)
def device():
set_rng_seed(42)
return get_current_device()
@pytest.fixture()
def omnivore_swin_t_model(device):
return omnivore.omnivore_swin_t().to(device)
@pytest.fixture()
def omnivore_swin_s_model(device):
return omnivore.omnivore_swin_s().to(device)
@pytest.fixture()
def omnivore_swin_b_model(device):
return omnivore.omnivore_swin_b().to(device)
def test_omnivore_swin_t_forward(omnivore_swin_t_model, device):
model = omnivore_swin_t_model
image = torch.randn((1, 3, 1, 112, 112), device=device) # B C D H W
image_score = model(image, input_type="image")
assert_expected(image_score.size(), torch.Size((1, 1000)))
assert_expected(
image_score.abs().sum(), torch.tensor(184.01417), rtol=1e-3, atol=1e-3
)
rgbd = torch.randn((1, 4, 1, 112, 112), device=device)
rgbd_score = model(rgbd, input_type="rgbd")
assert_expected(rgbd_score.size(), torch.Size((1, 19)))
assert_expected(rgbd_score.abs().sum(), torch.tensor(3.60813), rtol=1e-3, atol=1e-3)
video = torch.randn((1, 3, 4, 112, 112), device=device)
video_score = model(video, input_type="video")
assert_expected(video_score.size(), torch.Size((1, 400)))
assert_expected(
video_score.abs().sum(), torch.tensor(110.70048), rtol=1e-3, atol=1e-3
)
def test_omnivore_swin_s_forward(omnivore_swin_s_model, device):
model = omnivore_swin_s_model
image = torch.randn((1, 3, 1, 112, 112), device=device) # B C D H W
image_score = model(image, input_type="image")
assert_expected(image_score.size(), torch.Size((1, 1000)))
assert_expected(
image_score.abs().sum(), torch.tensor(239.73104), rtol=1e-3, atol=1e-3
)
rgbd = torch.randn((1, 4, 1, 112, 112), device=device)
rgbd_score = model(rgbd, input_type="rgbd")
assert_expected(rgbd_score.size(), torch.Size((1, 19)))
assert_expected(rgbd_score.abs().sum(), torch.tensor(5.80919), rtol=1e-3, atol=1e-3)
video = torch.randn((1, 3, 4, 112, 112), device=device)
video_score = model(video, input_type="video")
assert_expected(video_score.size(), torch.Size((1, 400)))
assert_expected(
video_score.abs().sum(), torch.tensor(136.49894), rtol=1e-3, atol=1e-3
)
def test_omnivore_swin_b_forward(omnivore_swin_b_model, device):
model = omnivore_swin_b_model
image = torch.randn((1, 3, 1, 112, 112), device=device) # B C D H W
image_score = model(image, input_type="image")
assert_expected(image_score.size(), torch.Size((1, 1000)))
assert_expected(
image_score.abs().sum(), torch.tensor(278.06488), rtol=1e-3, atol=1e-3
)
rgbd = torch.randn((1, 4, 1, 112, 112), device=device)
rgbd_score = model(rgbd, input_type="rgbd")
assert_expected(rgbd_score.size(), torch.Size((1, 19)))
assert_expected(rgbd_score.abs().sum(), torch.tensor(4.52186), rtol=1e-3, atol=1e-3)
video = torch.randn((1, 3, 4, 112, 112), device=device)
video_score = model(video, input_type="video")
assert_expected(video_score.size(), torch.Size((1, 400)))
assert_expected(
video_score.abs().sum(), torch.tensor(138.22859), rtol=1e-3, atol=1e-3
)
def test_omnivore_forward_wrong_input_type(omnivore_swin_t_model, device):
model = omnivore_swin_t_model
image = torch.randn((1, 3, 1, 112, 112), device=device) # B C D H W
with pytest.raises(AssertionError, match="Unsupported input_type: _WRONG_TYPE_.+"):
_ = model(image, input_type="_WRONG_TYPE_")
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_omnivore.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.video_gpt import video_gpt, video_vqvae
@pytest.fixture(autouse=True)
def set_seed():
return set_rng_seed(4)
_model_params = {
"video_gpt": {
"input_shape": (16, 64, 64),
"latent_shape": (8, 32, 32),
"d_model": 576,
"n_head": 4,
"dropout": 0.2,
"attn_dropout": 0.3,
"num_decoder_layers": 16,
"use_gpt_init": True,
},
"video_vqvae": {
"input_shape": (16, 64, 64),
"conv_filter_sizes": ((4, 4, 4),),
"conv_filter_strides": ((2, 2, 2),),
"encoder_filter_size": (3, 3, 3),
"encoder_filter_stride": (1, 1, 1),
"in_channel_dim": 3,
"encoder_hidden_dim": 240,
"n_res_layers": 4,
"attn_hidden_dim": 240,
"num_embeddings": 1024,
"embedding_dim": 256,
"decoder_hidden_dim": 240,
},
}
class TestVideoGPT:
_model_name = "video_gpt"
@pytest.fixture
def model_params(self):
return _model_params.get(self._model_name, {})
@pytest.fixture
def model_fn(self):
return video_gpt
def test_encode(self, model_fn, model_params):
model = model_fn(**model_params)
model.eval()
x = torch.randn((1, 3, *model_params["input_shape"])) # (b, c, *input_shape)
actual = model.encode(x, "in")
assert_expected(actual.shape, (1, 8192))
assert_expected(actual.sum().item(), 6678187)
def test_decode(self, model_fn, model_params):
model = model_fn(**model_params)
model.eval()
latent_seq_len = torch.prod(torch.tensor(model_params["latent_shape"])).item()
x = torch.randint(0, 10, (1, latent_seq_len)) # tokens
actual = model.decode(x)
assert_expected(actual.shape, (1, 3, 16, 64, 64))
assert_expected(actual.sum().item(), 14629.2432, rtol=1e-5, atol=1e-4)
@pytest.mark.parametrize(
"modality, expected_shape, expected_sum",
[("in", (1, 2, 256), 38.8214), ("out", (1, 2, 256), -23.4659)],
)
def test_lookup(
self, model_fn, model_params, modality, expected_shape, expected_sum
):
model = model_fn(**model_params)
model.eval()
x = torch.tensor([[1, 2]]) # tokens
actual = model.lookup(x, modality)
assert_expected(actual.shape, expected_shape)
assert_expected(actual.sum().item(), expected_sum, rtol=1e-5, atol=1e-4)
def test_forward(self, model_fn, model_params):
model = model_fn(**model_params)
model.eval()
n_head = model_params["n_head"]
x = torch.tensor([[1, 2, 3, 4]]) # (b, in_seq_len)
y = torch.tensor([[5, 6, 7]]) # (b, out_seq_len)
attn_mask = torch.tril(torch.ones(7, 7)).unsqueeze(0) # (b, seq_len, seq_len)
head_mask = torch.ones(1, n_head, 7, 7) # (b, h, seq_len, seq_len)
num_tokens = model.num_in_tokens + model.num_out_tokens
logits_mask = torch.ones(1, 7, num_tokens) # (b, seq_len, num_tokens)
out = model(
x,
y,
attn_mask=attn_mask,
head_mask=head_mask,
logits_mask=logits_mask,
use_cache=True,
causal=True,
return_attn_weights=True,
return_hidden_states=True,
)
actual = out.decoder_output.last_hidden_states
assert_expected(actual.shape, (1, 7, 576))
# Tolerance is fairly high but between Mac and Linux (AWS) it looks like the resuts
# are slightly different when rtol=1e-5
assert_expected(actual.sum().item(), 64.0230, rtol=1, atol=1e-4)
def test_video_vqvae():
model_name = "video_vqvae"
kwargs = _model_params.get(model_name, {})
input_shape = kwargs.pop("input_shape")
model = video_vqvae(**kwargs)
model.eval()
x = torch.randn((1, 3, *input_shape))
out = model(x)
actual = out.decoded
assert_expected(actual.shape, (1, 3, 16, 64, 64))
assert_expected(actual.sum().item(), -44372.4180, rtol=1, atol=1e-4)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/test_video_gpt.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn
from torchmultimodal.models.flava.transformer import init_transformer_weights
from torchmultimodal.modules.encoders.bert_text_encoder import BERTTextEncoder
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
from torchmultimodal.modules.layers.transformer import TransformerEncoder
@pytest.fixture(autouse=True)
def random():
set_rng_seed(0)
class TestFlavaTextEncoder:
@pytest.fixture
def emb_weights(self):
return torch.Tensor([[0, 1], [1, 0], [1, 1]])
@pytest.fixture
def text_encoder_components(self, emb_weights):
text_embedding = BERTTextEmbeddings(
hidden_size=2,
vocab_size=3,
max_position_embeddings=2,
dropout=0,
)
text_embedding.word_embeddings = nn.Embedding.from_pretrained(emb_weights)
text_embedding.position_embeddings = nn.Embedding.from_pretrained(emb_weights)
text_embedding.token_type_embeddings = nn.Embedding.from_pretrained(emb_weights)
text_embedding.eval()
encoder = TransformerEncoder(
n_layer=1,
d_model=2,
n_head=1,
dim_feedforward=1,
activation=nn.GELU,
norm_first=True,
)
weight_init_fn = partial(init_transformer_weights, initializer_range=0.02)
text_encoder = BERTTextEncoder(
embeddings=text_embedding,
encoder=encoder,
layernorm=nn.LayerNorm(2),
pooler=nn.Identity(),
weight_init_fn=weight_init_fn,
)
return text_encoder, text_embedding
@pytest.fixture
def input_ids(self):
return torch.IntTensor([[0, 1]])
@pytest.fixture
def attn_mask(self):
return torch.IntTensor([[1, 0]])
def test_embedding(self, text_encoder_components, input_ids):
_, text_embedding = text_encoder_components
out = text_embedding(input_ids)
expected = torch.Tensor([[[1.0, -1.0], [-1.0, 1.0]]])
assert_expected(out, expected)
def test_text_transformer(self, text_encoder_components, input_ids):
text_encoder, _ = text_encoder_components
out = text_encoder(
input_ids,
return_attn_weights=True,
return_hidden_states=True,
)
assert_expected(
out.last_hidden_state, torch.Tensor([[[1.0, -1.0], [-1.0, 1.0]]])
)
assert_expected(
out.hidden_states,
(
torch.Tensor([[[1.0000, -1.0000], [-1.0000, 1.0000]]]),
torch.Tensor([[[1.0008, -0.9994], [-0.9997, 1.0012]]]),
),
atol=1e-4,
rtol=0.0,
)
assert_expected(out.attentions, (torch.Tensor([[[[0, 1.0], [0.0, 1.0]]]]),))
def test_text_transformer_attn_mask(
self, text_encoder_components, input_ids, attn_mask
):
text_encoder, _ = text_encoder_components
out = text_encoder(
input_ids,
attention_mask=attn_mask,
return_attn_weights=True,
return_hidden_states=True,
)
assert_expected(
out.last_hidden_state, torch.Tensor([[[1.0, -1.0], [-1.0, 1.0]]])
)
assert_expected(
out.hidden_states,
(
torch.Tensor([[[1.0, -1.0], [-1.0, 1.0]]]),
torch.Tensor([[[0.9997, -1.0012], [-1.0008, 0.9994]]]),
),
atol=1e-4,
rtol=0.0,
)
assert_expected(out.pooler_output, torch.Tensor([[[1.0, -1.0], [-1.0, 1.0]]]))
assert_expected(out.attentions, (torch.Tensor([[[[1.0, 0], [1.0, 0]]]]),))
| EXA-1-master | exa/libraries/multimodal-main/tests/models/flava/test_text_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/libraries/multimodal-main/tests/models/flava/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn
from torchmultimodal.models.flava.model import (
flava_image_encoder,
flava_model_for_classification,
flava_model_for_pretraining,
flava_text_encoder,
FLAVAModel,
)
from torchmultimodal.modules.layers.transformer import TransformerOutput
NUM_CLASSES = 2
@pytest.fixture(autouse=True)
def random():
set_rng_seed(1234)
class TestFLAVA:
@pytest.fixture
def classification_inputs(self):
text = torch.randint(0, 30500, (2, 77), dtype=torch.long)
image = torch.rand((2, 3, 224, 224))
labels = torch.randint(0, 2, (2,), dtype=torch.long)
return text, image, labels
@pytest.fixture
def pretraining_inputs(self):
text = torch.randint(0, 30500, (2, 77), dtype=torch.long)
image = torch.rand((2, 3, 224, 224))
image_for_codebook = torch.rand(2, 3, 112, 112)
image_patches_mask = torch.randint(0, 2, (2, 196), dtype=torch.long)
text_masked = text.detach().clone()
text_masked[:, 1:3] = 100
mlm_labels = text.detach().clone()
mlm_labels[:, :] = -1
mlm_labels[:, 1:3] = text[:, 1:3]
itm_labels = torch.tensor((0, 1), dtype=torch.long)
return (
text,
image,
image_for_codebook,
image_patches_mask,
text_masked,
mlm_labels,
itm_labels,
)
@torch.no_grad()
def test_forward_classification(self, classification_inputs):
text, image, labels = classification_inputs
flava = flava_model_for_classification(NUM_CLASSES, pretrained=False)
flava.eval()
# Test multimodal scenario
output = flava(image, text, "mm", labels)
assert_expected(output.loss.item(), 0.7180, rtol=0, atol=1e-4)
# Test unimodal image scenario
output = flava(image, text, "image", labels)
assert_expected(output.loss.item(), 0.7020, rtol=0, atol=1e-4)
# Test unimodal text scenario
output = flava(image, text, "text", labels)
assert_expected(output.loss.item(), 0.6663, rtol=0, atol=1e-4)
@torch.no_grad()
def test_forward_pretraining(self, pretraining_inputs):
(
text,
image,
image_for_codebook,
image_patches_mask,
text_masked,
mlm_labels,
itm_labels,
) = pretraining_inputs
flava = flava_model_for_pretraining()
flava.eval()
output = flava(
image=image,
text=text,
image_for_codebook=image_for_codebook,
image_patches_mask=image_patches_mask,
text_masked=text_masked,
required_embedding="mm",
itm_labels=itm_labels,
mlm_labels=mlm_labels,
)
assert output.mlm_output is None
assert output.mim_output is None
assert output.global_contrastive_output is not None
assert output.mmm_text_output is not None
assert output.mmm_image_output is not None
assert output.itm_output is not None
assert_expected(
sum(
value if value is not None else 0 for value in output.losses.values()
).item(),
21.5150,
rtol=0,
atol=1e-4,
)
output = flava(
image=image,
text=text,
image_for_codebook=image_for_codebook,
image_patches_mask=image_patches_mask,
text_masked=text_masked,
required_embedding="image",
itm_labels=itm_labels,
mlm_labels=mlm_labels,
)
assert output.mlm_output is None
assert output.mim_output is not None
assert output.global_contrastive_output is None
assert output.mmm_text_output is None
assert output.mmm_image_output is None
assert output.itm_output is None
assert_expected(
sum(
value if value is not None else 0 for value in output.losses.values()
).item(),
8.9674,
rtol=0,
atol=1e-4,
)
output = flava(
image=image,
text=text,
image_for_codebook=image_for_codebook,
image_patches_mask=image_patches_mask,
text_masked=text_masked,
required_embedding="text",
itm_labels=itm_labels,
mlm_labels=mlm_labels,
)
assert output.mlm_output is not None
assert output.mim_output is None
assert output.global_contrastive_output is None
assert output.mmm_text_output is None
assert output.mmm_image_output is None
assert output.itm_output is None
assert_expected(
sum(
value if value is not None else 0 for value in output.losses.values()
).item(),
10.0305,
rtol=0,
atol=1e-4,
)
class TestFLAVAModel:
@pytest.fixture
def text_encoder(self):
return flava_text_encoder(
hidden_size=2,
num_attention_heads=1,
num_hidden_layers=1,
intermediate_size=2,
)
@pytest.fixture
def image_encoder(self):
return flava_image_encoder(
hidden_size=2,
num_attention_heads=1,
num_hidden_layers=1,
intermediate_size=2,
image_size=2,
patch_size=1,
num_channels=3,
use_image_masking=True,
)
@pytest.fixture
def flava(
self,
image_encoder,
text_encoder,
):
flava_model = FLAVAModel(
image_encoder=image_encoder,
text_encoder=text_encoder,
mm_encoder=nn.Identity(),
image_to_mm_projection=nn.Identity(),
text_to_mm_projection=nn.Identity(),
text_projection=nn.Identity(),
image_projection=nn.Identity(),
)
flava_model.eval()
return flava_model
@pytest.fixture
def inputs(self):
image = torch.zeros(2, 3, 2, 2)
masked_image = torch.ones(2, 1)
text = torch.ones(2, 3, dtype=torch.int32)
masked_text = torch.ones(2, 3, dtype=torch.int32)
return image, masked_image, text, masked_text
def test_forward_image_text(self, image_encoder, text_encoder, flava, inputs):
image, _, text, _ = inputs
actual = flava(image, text)
expected_image = image_encoder(image)
expected_text = text_encoder(
text, return_attn_weights=True, return_hidden_states=True
)
assert actual.text_masked == TransformerOutput()
assert actual.multimodal_masked == TransformerOutput()
assert actual.multimodal == TransformerOutput()
assert_expected(actual.text, expected_text)
assert_expected(actual.image, expected_image)
assert_expected(actual.image_masked, expected_image)
assert_expected(
actual.projected_image_embeddings, expected_image.last_hidden_state[:, 0, :]
)
assert_expected(
actual.projected_text_embeddings, expected_text.last_hidden_state[:, 0, :]
)
def test_forward_masked_image_and_text(
self, image_encoder, text_encoder, flava, inputs
):
image, masked_image, text, masked_text = inputs
actual = flava(
text=text,
image=image,
image_patches_mask=masked_image,
text_masked=masked_text,
)
expected_image = image_encoder(image)
expected_image_masked = image_encoder(image, masked_image)
expected_text = text_encoder(
text, return_attn_weights=True, return_hidden_states=True
)
expected_text_masked = text_encoder(
masked_text, return_attn_weights=True, return_hidden_states=True
)
assert actual.multimodal == TransformerOutput()
assert_expected(actual.text_masked, expected_text_masked)
assert_expected(
actual.multimodal_masked,
torch.cat(
[
expected_image_masked.hidden_states[-1],
expected_text_masked.hidden_states[-1],
],
1,
),
)
assert_expected(actual.text, expected_text)
assert_expected(actual.image, expected_image)
assert_expected(actual.image_masked, expected_image_masked)
assert_expected(
actual.projected_image_embeddings, expected_image.last_hidden_state[:, 0, :]
)
assert_expected(
actual.projected_text_embeddings, expected_text.last_hidden_state[:, 0, :]
)
def test_forward_masked_text(self, text_encoder, flava, inputs):
_, _, text, masked_text = inputs
text = torch.ones(2, 3, dtype=torch.int32)
masked_text = torch.ones(2, 3, dtype=torch.int32)
actual = flava(text=text, text_masked=masked_text)
expected_text = text_encoder(
text, return_attn_weights=True, return_hidden_states=True
)
assert actual.multimodal_masked == TransformerOutput()
assert actual.multimodal == TransformerOutput()
assert actual.image == TransformerOutput()
assert actual.image_masked == TransformerOutput()
assert actual.projected_image_embeddings is None
assert_expected(actual.text, expected_text)
assert_expected(
actual.text_masked,
text_encoder(
masked_text, return_attn_weights=True, return_hidden_states=True
),
)
assert_expected(
actual.projected_text_embeddings, expected_text.last_hidden_state[:, 0, :]
)
def test_forward_text(self, text_encoder, flava, inputs):
_, _, text, _ = inputs
actual = flava(text=text)
expected_text = text_encoder(
text, return_attn_weights=True, return_hidden_states=True
)
assert actual.multimodal_masked == TransformerOutput()
assert actual.multimodal == TransformerOutput()
assert actual.image == TransformerOutput()
assert actual.image_masked == TransformerOutput()
assert actual.text_masked == TransformerOutput()
assert actual.projected_image_embeddings is None
assert_expected(actual.text, expected_text)
assert_expected(
actual.projected_text_embeddings, expected_text.last_hidden_state[:, 0, :]
)
def test_forward_masked_image(self, image_encoder, flava, inputs):
image, masked_image, _, _ = inputs
actual = flava(image=image, image_patches_mask=masked_image)
expected_image = image_encoder(image)
assert actual.multimodal_masked == TransformerOutput()
assert actual.multimodal == TransformerOutput()
assert actual.text == TransformerOutput()
assert actual.text_masked == TransformerOutput()
assert actual.projected_text_embeddings is None
assert_expected(actual.image, expected_image)
assert_expected(actual.image_masked, image_encoder(image, masked_image))
assert_expected(
actual.projected_image_embeddings, expected_image.last_hidden_state[:, 0, :]
)
def test_forward_image(self, image_encoder, flava, inputs):
image, _, _, _ = inputs
actual = flava(image=image)
expected_image = image_encoder(image)
assert actual.multimodal_masked == TransformerOutput()
assert actual.multimodal == TransformerOutput()
assert actual.text == TransformerOutput()
assert actual.text_masked == TransformerOutput()
assert actual.projected_text_embeddings is None
assert_expected(actual.image, expected_image)
assert_expected(actual.image_masked, image_encoder(image))
assert_expected(
actual.projected_image_embeddings, expected_image.last_hidden_state[:, 0, :]
)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/flava/test_flava.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.flava.model import (
flava_model,
flava_model_for_classification,
flava_model_for_pretraining,
)
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
class TestFLAVACheckpoint:
@pytest.fixture
def text_input(self):
text = torch.randint(0, 30500, (2, 77), dtype=torch.long)
return text
@pytest.fixture
def image_input(self):
image = torch.rand((2, 3, 224, 224))
return image
@pytest.fixture
def inputs_classification(self, image_input, text_input):
def gather_inputs(required_embedding):
labels = torch.tensor((0, 1), dtype=torch.long)
return image_input, text_input, required_embedding, labels
return gather_inputs
@pytest.fixture
def inputs_pretraining(self, image_input, text_input):
def gather_inputs(required_embedding):
image_for_codebook = torch.rand(2, 3, 112, 112)
image_patches_mask = torch.randint(0, 2, (2, 196), dtype=torch.long)
text_masked = text_input.detach().clone()
text_masked[:, 1:3] = 100
mlm_labels = text_input.detach().clone()
mlm_labels[:, :] = -1
mlm_labels[:, 1:3] = text_input[:, 1:3]
itm_labels = torch.tensor((0, 1), dtype=torch.long)
skip_unmasked_mm_encoder = True
return (
image_input,
text_input,
image_for_codebook,
image_patches_mask,
text_masked,
required_embedding,
skip_unmasked_mm_encoder,
itm_labels,
mlm_labels,
)
return gather_inputs
@pytest.fixture
def inputs_model(self, image_input, text_input):
return image_input, text_input
@pytest.fixture
def classification_model(self):
def get_model():
flava = flava_model_for_classification(num_classes=3, pretrained=True)
flava.eval()
return flava
return get_model
@pytest.fixture
def pretraining_model(self):
def get_model():
flava = flava_model_for_pretraining(pretrained=True)
flava.eval()
return flava
return get_model
@pytest.fixture
def model(self):
def get_model():
flava = flava_model(pretrained=True)
flava.eval()
return flava
return get_model
def _assert_tensor_dicts_equal(self, dict_actual, dict_expected):
for key in dict_expected:
actual = torch.zeros(1) if dict_actual[key] is None else dict_actual[key]
expected = (
torch.zeros(1)
if dict_expected[key] is None
else torch.tensor(dict_expected[key])
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_flava_model_for_classification(
self, inputs_classification, classification_model
):
mm_input = inputs_classification("mm")
image_input = inputs_classification("image")
text_input = inputs_classification("text")
flava = classification_model()
output = flava(*mm_input)
actual = output.loss
expected = torch.tensor(1.0827)
assert_expected(actual, expected, rtol=0, atol=1e-4)
output = flava(*image_input)
actual = output.loss
expected = torch.tensor(1.0849)
assert_expected(actual, expected, rtol=0, atol=1e-4)
output = flava(*text_input)
actual = output.loss
expected = torch.tensor(1.0822)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_flava_model_for_pretraining(self, inputs_pretraining, pretraining_model):
mm_input = inputs_pretraining("mm")
image_input = inputs_pretraining("image")
text_input = inputs_pretraining("text")
flava = pretraining_model()
output = flava(*mm_input)
actual = output.losses
expected = dict(
mmm_text_loss=10.9567,
mmm_image_loss=11.2143,
mim_loss=None,
mlm_loss=None,
itm_loss=1.1485,
global_contrastive_loss=0.7104,
)
self._assert_tensor_dicts_equal(actual, expected)
output = flava(*image_input)
actual = output.losses
expected = dict(
mmm_text_loss=None,
mmm_image_loss=None,
mim_loss=10.5749,
mlm_loss=None,
itm_loss=None,
global_contrastive_loss=None,
)
self._assert_tensor_dicts_equal(actual, expected)
output = flava(*text_input)
actual = output.losses
expected = dict(
mmm_text_loss=None,
mmm_image_loss=None,
mim_loss=None,
mlm_loss=16.1049,
itm_loss=None,
global_contrastive_loss=None,
)
self._assert_tensor_dicts_equal(actual, expected)
def test_flava_model(self, inputs_model, model):
flava = model()
output = flava(*inputs_model, skip_unmasked_mm_encoder=False)
actual = torch.sum(output.image.last_hidden_state)
expected = torch.tensor(-1321.3137)
assert_expected(actual, expected, rtol=0, atol=1e-3)
actual = torch.sum(output.text.last_hidden_state)
expected = torch.tensor(-220.2462)
assert_expected(actual, expected, rtol=0, atol=1e-3)
actual = torch.sum(output.multimodal.last_hidden_state)
expected = torch.tensor(-4358.3115)
assert_expected(actual, expected, rtol=0, atol=1e-3)
| EXA-1-master | exa/libraries/multimodal-main/tests/models/flava/test_checkpoint.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.