python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, 'scripts', 'ems',
'support', 'symmetrize-fast-align.perl')
# create joined file
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r', encoding='utf-8') as src, open(args.target_file, 'r', encoding='utf-8') as tgt:
with open(joined_file, 'w', encoding='utf-8') as joined:
for s, t in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
FWD=fwd_align_file)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
BWD=bwd_align_file)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin
)
assert os.system(sym_cmd) == 0
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/build_sym_alignment.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for decoding")
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(l):
return "".join(sp.DecodePieces(l))
elif args.input_format == "id":
def decode(l):
return "".join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
print(decode(list(map(tok2int, line.rstrip().split()))))
if __name__ == "__main__":
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/spm_decode.py |
data2vec_vision-main | infoxlm/fairseq/scripts/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r'checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt')
pt_regexp_epoch_based = re.compile(r'checkpoint(\d+)\.pt')
pt_regexp_update_based = re.compile(r'checkpoint_\d+_(\d+)\.pt')
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
'Recursively delete checkpoint files from `root_dir`, '
'but preserve checkpoint_best.pt and checkpoint_last.pt'
)
)
parser.add_argument('root_dirs', nargs='*')
parser.add_argument('--save-last', type=int, default=0, help='number of last checkpoints to save')
parser.add_argument('--save-every', type=int, default=0, help='interval of checkpoints to save')
parser.add_argument('--preserve-test', action='store_true',
help='preserve checkpoints in dirs that start with test_ prefix (default: delete them)')
parser.add_argument('--delete-best', action='store_true', help='delete checkpoint_best.pt')
parser.add_argument('--delete-last', action='store_true', help='delete checkpoint_last.pt')
parser.add_argument('--no-dereference', action='store_true', help='don\'t dereference symlinks')
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
(
not os.path.basename(root).startswith('test_')
or args.preserve_test
)
and (
(file == 'checkpoint_last.pt' and not args.delete_last)
or (file == 'checkpoint_best.pt' and not args.delete_best)
or file in to_save
)
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print('Nothing to do.')
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print('Operations to perform (in order):')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(' - preserve (and dereference symlink): ' + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(' - preserve: ' + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(' - delete: ' + file)
while True:
resp = input('Continue? (Y/N): ')
if resp.strip().lower() == 'y':
break
elif resp.strip().lower() == 'n':
sys.exit(0)
print('Executing...')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print('rm ' + file)
os.remove(file)
print('cp {} {}'.format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print('rm ' + file)
os.remove(file)
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/rm_pt.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--gzip', action='store_true')
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, 'r')
else:
return open(args.input, 'r', encoding='utf-8')
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/count_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for encoding")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter/encode")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save encoded outputs")
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument("--min-len", type=int, metavar="N",
help="filter sentence pairs with fewer than N tokens")
parser.add_argument("--max-len", type=int, metavar="N",
help="filter sentence pairs with more than N tokens")
args = parser.parse_args()
assert len(args.inputs) == len(args.outputs), \
"number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(l):
return sp.EncodeAsPieces(l)
elif args.output_format == "id":
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (
(args.min_len is None or len(line) >= args.min_len)
and (args.max_len is None or len(line) <= args.max_len)
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8")) \
if input != "-" else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8")) \
if output != "-" else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/spm_encode.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into shards while respecting document boundaries. Documents
should be separated by a single empty line.
"""
import argparse
import contextlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--num-shards', type=int)
args = parser.parse_args()
assert args.num_shards is not None and args.num_shards > 1
with open(args.input, 'r', encoding='utf-8') as h:
with contextlib.ExitStack() as stack:
outputs = [
stack.enter_context(open(args.input + ".shard" + str(i), "w", encoding="utf-8"))
for i in range(args.num_shards)
]
doc = []
first_doc = [True]*args.num_shards
def output_doc(i):
if not first_doc[i]:
outputs[i].write("\n")
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if line.strip() == "": # empty line indicates new document
output_doc(num_docs % args.num_shards)
num_docs += 1
else:
doc.append(line)
output_doc(num_docs % args.num_shards)
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/shard_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
| data2vec_vision-main | infoxlm/fairseq/scripts/spm_train.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which checkpoint to use, '
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.')
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or args.num_epoch_checkpoints is not None, \
'--checkpoint-upper-bound requires --num-epoch-checkpoints'
assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \
'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound,
)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/average_checkpoints.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import soundfile
import random
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index')
parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D',
help='percentage of data to use as validation set (between 0 and 1)')
parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory')
parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for')
parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed')
parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG',
help='if set, path must contain this substring for a file to be included in the manifest')
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, '**/*.' + args.ext)
rand = random.Random(args.seed)
with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(
os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
print(dir_path, file=train_f)
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| data2vec_vision-main | infoxlm/fairseq/scripts/wav2vec_manifest.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import data_utils, Dictionary, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description='writes text from binarized file to stdout')
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default='lazy',
)
for tensor_line in dataset:
if dictionary is None:
line = ' '.join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == '__main__':
main()
| data2vec_vision-main | infoxlm/fairseq/scripts/read_binarized.py |
import setuptools
setuptools.setup(
name="infoxlm",
version="0.0.1",
author="Zewen",
author_email="[email protected]",
description="infoxlm",
url="https://github.com/CZWin32768/XLM-Align",
packages=setuptools.find_packages(),
install_requires=[],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
) | data2vec_vision-main | infoxlm/src-infoxlm/setup.py |
import infoxlm
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main() | data2vec_vision-main | infoxlm/src-infoxlm/train.py |
import infoxlm.tasks
import infoxlm.models
import infoxlm.criterions | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/__init__.py |
import torch
from fairseq import utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
if torch.cuda.device_count() > 1:
return varsize_tensor_all_gather(tensor)
else:
output = tensor
return output
@torch.no_grad()
def tensor_all_gather(tensor):
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def varsize_tensor_all_gather(tensor):
# cuda_device = f'cuda:{torch.distributed.get_rank()}
cuda_device = 'cuda'
if tensor is None:
size_tens = torch.tensor([0], dtype=torch.int64, device=cuda_device)
else:
size_tens = torch.tensor([tensor.shape[0]], dtype=torch.int64, device=cuda_device)
# print("size_tens", flush=True)
# print(size_tens, flush=True)
size_tens = tensor_all_gather(size_tens).cpu()
max_size = size_tens.max()
padded = torch.empty(max_size, *tensor.shape[1:],
dtype=tensor.dtype,
device=cuda_device)
if tensor is not None:
padded[:tensor.shape[0]] = tensor
# print("padded:", flush=True)
# print(padded, flush=True)
ag = tensor_all_gather(padded)
# print("ag:", flush=True)
# print(ag, flush=True)
slices = []
for i, sz in enumerate(size_tens):
start_idx = i * max_size
end_idx = start_idx + sz.item()
if end_idx > start_idx:
slices.append(ag[start_idx:end_idx])
ret = torch.cat(slices, dim=0)
return ret.to(tensor)
def _get_logging_loss(loss, reduce=True):
if loss is None: return 0
return utils.item(loss.data) if reduce else loss.data
def construct_idx_tensor_from_list(idx_list2d, lens, pad_idx, device=None):
max_len = max(lens)
padded_list = [list_i + [pad_idx] * (max_len - lens[i]) for i, list_i in enumerate(idx_list2d)]
tensor = torch.LongTensor(padded_list)
if device is not None:
tensor = tensor.to(device=device)
return tensor
def move_to_device(sample, device):
def _move_to_device(tensor):
return tensor.to(device=device)
return utils.apply_to_sample(_move_to_device, sample)
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/utils.py |
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.tasks.mlm import Mlm
@register_task("tlm")
class Tlm(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"])
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"])
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for key, value in logging_output.items():
agg_logging_output[key] += value
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
})
self.datasets[split] = dataset
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/tasks/tlm.py |
import os
from functools import lru_cache
import numpy as np
import torch
from fairseq import utils
from fairseq.data.data_utils import process_bpe_symbol
from fairseq.data.dictionary import Dictionary
from fairseq.tasks import FairseqTask, register_task
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.data.xlm_align import get_xlm_align_dataset_with_mask
def extract_wa_from_pi_xi(pi, xi):
m, n = pi.size()
forward = torch.eye(n)[pi.argmax(dim=1)]
backward = torch.eye(m)[xi.argmax(dim=0)]
inter = forward * backward.transpose(0, 1)
ret = []
for i in range(m):
for j in range(n):
if inter[i, j].item() > 0:
ret.append((i, j))
return ret
def _sinkhorn_iter(S, num_iter=2):
assert S.dim() == 2
S[S <= 0] = 1e-6
pi = S
xi = pi
for i in range(num_iter):
pi_sum_over_i = pi.sum(dim=0, keepdim=True)
xi = pi / pi_sum_over_i
xi_sum_over_j = xi.sum(dim=1, keepdim=True)
pi = xi / xi_sum_over_j
return pi, xi
@register_task('xlm_align')
class XlmAlignTask(FairseqTask):
@staticmethod
def add_args(parser):
# MLM args
mlm_utils.add_mlm_args(parser)
parser.add_argument('data', help='colon separated path to data directories list, '
'will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments per sample')
# apply prepend bos + tokenblock
parser.add_argument('--apply_ptb', default=False, action='store_true')
# TLM args
parser.add_argument('--tlm_data', type=str, default="")
# Word Alignment Self-Labeling
parser.add_argument('--wa_layer', type=int, default=8, help="the layer to obtain word alignment")
parser.add_argument('--wa_max_count', type=int, default=2, help="max_count for itermax")
parser.add_argument('--align_enable_step', default=-1, type=int)
parser.add_argument('--feed_inner_states', default=False, action='store_true')
parser.add_argument('--sinkhorn_iter', type=int, default=2, help="num of sinkhorn iterations")
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| Dictionary: {} types'.format(len(dictionary)), flush=True)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.seed = args.seed
self.mww = self._get_whole_word_mask()
self.sa_model = None
self._enable_align = False
def prepare_train(self, model, criterion):
print("| Prepare train ...", flush=True)
self.model = model
model.train()
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
print("| Get whole work mask ...")
return mlm_utils.get_whole_word_mask(self.args, self.dictionary)
return None
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
sa_dataset = get_xlm_align_dataset_with_mask(args, dataset_path, self.dictionary, self.mask_idx, combine=False)
dataset = DictDataset({
"mlm": mlm_dataset,
"sa": sa_dataset
})
# NOTE Set dataset epoch as sid for different random state
# of each shard, because when local indices are the same, the
# random states are the same.
dataset.set_epoch(sid)
self.datasets[split] = dataset
def iter_max(self, sim_matrix):
sim_matrix = sim_matrix.cpu().detach().numpy()
max_count = self.args.wa_max_count
alpha_ratio = 0.9
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
inter = forward * backward.transpose()
# if min(m, n) <= 2:
# return inter
if min(m, n) > 2:
new_inter = np.zeros((m, n))
count = 1
while count < max_count:
mask_x = 1.0 - np.tile(inter.sum(1)[:, np.newaxis], (1, n)).clip(0.0, 1.0)
mask_y = 1.0 - np.tile(inter.sum(0)[np.newaxis, :], (m, 1)).clip(0.0, 1.0)
mask = ((alpha_ratio * mask_x) + (alpha_ratio * mask_y)).clip(0.0, 1.0)
mask_zeros = 1.0 - ((1.0 - mask_x) * (1.0 - mask_y))
if mask_x.sum() < 1.0 or mask_y.sum() < 1.0:
mask *= 0.0
mask_zeros *= 0.0
new_sim = sim_matrix * mask
fwd = np.eye(n)[new_sim.argmax(axis=1)] * mask_zeros
bac = np.eye(m)[new_sim.argmax(axis=0)].transpose() * mask_zeros
new_inter = fwd * bac
if np.array_equal(inter + new_inter, inter):
break
inter = inter + new_inter
count += 1
ret = []
for i in range(m):
for j in range(n):
if inter[i, j] > 0:
ret.append((i, j))
return inter, ret
def get_gold_or_silver_wa(self, sample, batch_sim, src_fr, src_to, trg_fr, trg_to):
# bsz, max_len, _ = batch_sim.size()
# print(src_fr)
# print(src_to)
# print(trg_fr)
# print(trg_to)
# print("batch_sim.shape=%s" % str(batch_sim.shape))
if "gold_wa" in sample:
loaded_gold_wa = sample["gold_wa"]
else:
loaded_gold_wa = [None] * len(batch_sim)
gold_wa = []
for i, sim in enumerate(batch_sim):
loaded_gold_wa_i = loaded_gold_wa[i]
if loaded_gold_wa_i is not None:
gold_wa.append(loaded_gold_wa_i)
# print(loaded_gold_wa_i)
continue
# NOTE if gold_wa is None then generate silver wa
# print("sim.shape=%s" % str(sim.shape))
sim_wo_offset = sim[src_fr[i]: src_to[i], trg_fr[i]: trg_to[i]]
# print("sim_wo_offset=%s" % str(sim_wo_offset.shape))
if src_to[i] - src_fr[i] <= 0 or trg_to[i] - trg_fr[i] <= 0:
print("[W] src or trg len=0")
gold_wa.append([])
continue
_, gold_wa_i_wo_offset = self.iter_max(sim_wo_offset)
gold_wa_i = []
for src_idx, trg_idx in gold_wa_i_wo_offset:
gold_wa_i.append((src_idx + src_fr[i], trg_idx + trg_fr[i]))
gold_wa.append(gold_wa_i)
return gold_wa
def get_aligned_tokens(self, sample, model, use_csls=False, return_inner_states=False):
_, inner_states = model(**sample['net_input'],
features_only=True, return_all_hiddens=True)
# rep: batch, hidden, length
rep = inner_states["inner_states"][self.args.wa_layer]
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
# rep: batch, length, hidden
rep = rep.transpose(0, 1)
if use_csls: raise NotImplementedError
batch_sim = torch.bmm(rep, rep.transpose(1,2))
wa = self.get_gold_or_silver_wa(sample, batch_sim, src_fr, src_to, trg_fr, trg_to)
if return_inner_states: return wa, inner_states
else: return wa
def _extract_wa_from_pi_xi(self, pi, xi):
# return extract_wa_from_pi_xi(pi, xi)
_, wa = self.iter_max(pi)
return wa
def _set_enable_align(self, num_updates):
if num_updates < self.args.align_enable_step: self._enable_align = False
else: self._enable_align = True
def update_step(self, num_updates):
self._set_enable_align(num_updates)
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
if self.sa_model is None:
self.sa_model = model
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
if self._enable_align:
self.sa_model.eval()
if self.args.feed_inner_states:
with torch.no_grad():
aligned_tokens, inner_states = self.get_aligned_tokens(sample["sa"], self.sa_model, return_inner_states=True)
model.train()
loss, sample_size, logging_output = criterion(
model, sample["sa"], reduce=True, aligned_tokens=aligned_tokens, inner_states=inner_states)
else:
with torch.no_grad():
aligned_tokens = self.get_aligned_tokens(sample["sa"], self.sa_model)
model.train()
loss, sample_size, logging_output = criterion(
model, sample["sa"], reduce=True, aligned_tokens=aligned_tokens)
if ignore_grad: loss *= 0
optimizer.backward(loss)
else:
model.train()
loss, sample_size, logging_output = criterion(model, sample["sa"], tlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
loss, sample_size, logging_output = criterion(model, sample["mlm"], mlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
return agg_loss, agg_sample_size, agg_logging_output
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/tasks/xlm_align.py |
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
@register_task("mlm")
class Mlm(FairseqTask):
@staticmethod
def add_args(parser):
mlm_utils.add_mlm_args(parser)
parser.add_argument('data', help='colon separated path to data directories list, '
'will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments per sample')
# apply prepend bos + tokenblock
parser.add_argument('--apply_ptb', default=False, action='store_true')
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| Dictionary: {} types'.format(len(dictionary)), flush=True)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.seed = args.seed
self.mww = self._get_whole_word_mask()
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
print("| Get whole work mask ...")
return mlm_utils.get_whole_word_mask(self.args, self.dictionary)
return None
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
self.datasets[split] = dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/tasks/mlm.py |
import argparse
import importlib
import os
from fairseq.tasks import TASK_REGISTRY
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('infoxlm.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/tasks/__init__.py |
import os
import torch
from functools import lru_cache
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from fairseq.data import FairseqDataset
from fairseq import utils
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.data.xlco_dataset import get_xlco_dataset
from infoxlm.tasks.mlm import Mlm
def _prepare_sample(sample, cuda=True, fp16=True):
if sample is None or len(sample) == 0:
return None
if cuda:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
if fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
@register_task("infoxlm")
class InfoXLM(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
parser.add_argument('--xlco_data', type=str, default="")
# e.g. constant,0.999
# e.g. linear,0,700000,0.999,1.0
parser.add_argument('--xlco_momentum', default="constant,0.999", type=str)
parser.add_argument('--xlco_enable_step', default=-1, type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# NOTE walkaround for model building
# Actually, self.langs represents the keys of proj heads
self.model_langs = ["share_lang"]
self.xlco_lambda = self.args.xlco_lambda
# parse xlco_momentum
cxlm_args = self.args.xlco_momentum.split(",")
# self.constant_xlco_momentum = True
self.cxlm_scheduler = "constant"
self.constant_momentum_refresh_interval = -1
if cxlm_args[0] == "constant":
self._xlco_momentum = float(cxlm_args[1])
print("Momentum args: consant momentum: %.4f" % (self._xlco_momentum), flush=True)
elif cxlm_args[0] == "linear":
# self.constant_xlco_momentum = False
self.cxlm_scheduler = "linear"
self._mom_schedule_begin, self._mom_schedule_end, self._xlco_momentum_min, self._xlco_momentum_max = map(float, cxlm_args[1:])
print("Momentum args: linear self._mom_schedule_begin: %.4f, self._mom_schedule_end: %.4f, self._xlco_momentum_min: %.4f, self._xlco_momentum_max: %.4f " % (self._mom_schedule_begin, self._mom_schedule_end, self._xlco_momentum_min, self._xlco_momentum_max), flush=True)
assert self._mom_schedule_end >= self._mom_schedule_begin
elif cxlm_args[0] == "constant_with_refresh":
self._xlco_momentum = float(cxlm_args[1])
self.constant_momentum_refresh_interval = int(cxlm_args[2])
print("Momentum args: consant momentum: %.4f, refresh interval: %d" % (self._xlco_momentum, self.constant_momentum_refresh_interval), flush=True)
elif cxlm_args[0] == "exponential":
# example exponential,0.51,0.0,0.9995
self.cxlm_scheduler = "exponential"
self._xlco_momentum_alpha, self._xlco_momentum_min, self._xlco_momentum_max = map(float, cxlm_args[1:])
print("Momentum args: exponential self._xlco_momentum_alpha: %.4f, self._xlco_momentum_min: %.4f, self._xlco_momentum_max: %.4f " % (self._xlco_momentum_alpha, self._xlco_momentum_min, self._xlco_momentum_max), flush=True)
else:
raise NotImplementedError
self._cur_momentum = self.get_xlco_momentum(0)
print("Test get_xlco_momentum ...")
for i in range(10):
num_updates = i * 100000
print("num_updates: %d get_xlco_momentum:%f" % (i, self.get_xlco_momentum(num_updates)))
def get_xlco_momentum(self, num_updates):
if self.cxlm_scheduler == "constant":
if self.constant_momentum_refresh_interval == -1:
return self._xlco_momentum
else:
if num_updates % self.constant_momentum_refresh_interval == 0:
return 0.0
else:
return self._xlco_momentum
elif self.cxlm_scheduler == "linear":
if num_updates <= self._mom_schedule_begin:
return self._xlco_momentum_min
elif num_updates >= self._mom_schedule_end:
return self._xlco_momentum_max
else:
return (num_updates - self._mom_schedule_begin) * (self._xlco_momentum_max - self._xlco_momentum_min) / (self._mom_schedule_end - self._mom_schedule_begin) + self._xlco_momentum_min
elif self.cxlm_scheduler == "exponential":
if num_updates <= 0: return self._xlco_momentum_min
mom = 1.0 - num_updates ** (-self._xlco_momentum_alpha)
mom = max(mom, self._xlco_momentum_min)
mom = min(mom, self._xlco_momentum_max)
return mom
else:
raise ValueError
def prepare_train(self, model, criterion):
print("| Prepare train ...", flush=True)
# DEBUG
# print("Test get_xlco_momentum ...")
# for i in range(10):
# num_updates = i * 100000
# print("num_updates: %d get_xlco_momentum:%f" % (i, self.get_xlco_momentum(num_updates)))
self.model = model
model.train()
if not model.is_queue_ready():
self.fill_queue(criterion)
assert model.is_queue_ready()
def fill_queue(self, criterion):
print("| Filling language queue ... ")
fill_opt_cnt = 0
dummy_batch = None
epoch_itr = self.get_batch_iterator(
dataset=self.load_xlco_dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.max_positions(), self.model.max_positions()
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.args.distributed_world_size,
shard_id=self.args.distributed_rank,
num_workers=0,
epoch=0,)
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=self.args.fix_batches_to_gpus,
shuffle=False,)
# DEBUG
# NOTE add a ref to prevent deletion
# self._fill_queue_itr = itr
ddp_size = 1 if not hasattr(self.args, "distributed_world_size") else self.args.distributed_world_size
tot_fill_opt = criterion.xlco_queue_size // self.args.max_sentences // ddp_size + 100
# print("| %d filling opt in total." % tot_fill_opt, flush=True)
for _ in range(tot_fill_opt):
sample = next(itr)
if dummy_batch is None: dummy_batch = sample
sample = _prepare_sample(sample)
if sample is None:
sample = _prepare_sample(dummy_batch)
print("| [W] a dummy batch used", flush=True)
with torch.no_grad():
criterion(self.model, sample)
if fill_opt_cnt % 100 == 0:
print("| Filling queue, fill_opt_cnt: %d" % fill_opt_cnt, flush=True)
fill_opt_cnt += 1
print("| %d filling opt in total." % fill_opt_cnt, flush=True)
assert self.model.is_queue_ready()
print("| queue.mean(): %f, queue.var(): %f" % (self.model.queue.mean().item(), self.model.queue.var().item()))
del itr
del epoch_itr
def update_step(self, num_updates):
if num_updates < self.args.xlco_enable_step:
self.xlco_lambda = 0.0
self._cur_momentum = 0.0
if num_updates + 5 >= self.args.xlco_enable_step:
self.model.update_slow_weight(0.0)
else:
self.xlco_lambda = self.args.xlco_lambda
self._cur_momentum = self.get_xlco_momentum(num_updates)
self.model.update_slow_weight(self._cur_momentum)
# pass
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# cxlm_step
loss, sample_size, logging_output = criterion(model, sample["xlco"])
if loss is None:
raise ValueError
if ignore_grad: loss *= 0
cxlm_loss = loss
optimizer.backward(cxlm_loss)
if loss is not None:
agg_loss += cxlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"], mlm=True)
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"], mlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
# agg_logging_output.update(logging_output)
for key, value in logging_output.items():
agg_logging_output[key] += value
# print("DEBUG2: %s" % str(agg_logging_output))
agg_logging_output["momentum"] = self._cur_momentum
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.xlco_data, "train.%d" % sid)
xlco_dataset = get_xlco_dataset(
args, dataset_path, self.dictionary, self.mask_idx, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
"xlco": xlco_dataset
})
# NOTE Set dataset epoch as sid for different random state
# of each shard, because when local indices are the same, the
# random states are the same.
dataset.set_epoch(sid)
self.datasets[split] = dataset
def load_xlco_dataset(self, split, epoch=0, combine=False, **kwargs):
args = self.args
dataset_path = os.path.join(args.xlco_data, "train.0")
xlco_dataset = get_xlco_dataset(
args, dataset_path, self.dictionary, self.mask_idx)
return xlco_dataset
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/tasks/infoxlm.py |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
RobertaEncoder,
roberta_base_architecture,
roberta_large_architecture,
)
@register_model("reload_roberta")
class ReloadRoberta(RobertaModel):
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
parser.add_argument('--roberta-model-path', type=str, default="")
@classmethod
def build_model(cls, args, task):
reload_roberta_base(args)
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
model = cls(args, encoder)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model.load_state_dict(state["model"], strict=True, args=args)
print(model.__class__)
return model
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs):
raise NotImplementedError
# NOTE WALKAROUND `size` method of dataset classes
# examples are filtered during preprocessing
# so we do not need to filter once again
def max_positions(self):
"""Maximum length supported by the model."""
return None
@register_model_architecture("reload_roberta", "reload_roberta_base")
def reload_roberta_base(args):
roberta_base_architecture(args)
@register_model_architecture("reload_roberta", "reload_roberta_large")
def reload_roberta_large(args):
roberta_large_architecture(args)
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/models/roberta.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
roberta_base_architecture,
roberta_large_architecture
)
from fairseq.modules import LayerNorm
from infoxlm.models.roberta import ReloadRoberta, reload_roberta_base, RobertaEncoder
@register_model("xlm_align")
class XlmAlignModel(ReloadRoberta):
@staticmethod
def add_args(parser):
ReloadRoberta.add_args(parser)
parser.add_argument('--no_linear_proj', default=False, action='store_true')
def __init__(self, args, encoder):
super().__init__(args, encoder)
if args.no_linear_proj:
self.q_linear = self.k_linear = lambda x: x
else:
self.q_linear = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim,)
self.k_linear = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim,)
@classmethod
def build_model(cls, args, task):
reload_roberta_base(args)
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
model = cls(args, encoder)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model.load_state_dict(state["model"], strict=False, args=args)
print(model.__class__)
return model
@register_model_architecture("xlm_align", "xlm_align_base")
def xlm_align_base(args):
roberta_base_architecture(args)
@register_model_architecture("xlm_align", "xlm_align_large")
def xlm_align_large(args):
roberta_large_architecture(args)
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/models/xlm_align.py |
import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('infoxlm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/models/__init__.py |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
roberta_base_architecture,
roberta_large_architecture
)
from infoxlm.utils import concat_all_gather
def build_projection_dict(langs, dim, activation_fn, fp16=False):
proj_dict = {}
cnt = 0
for lang in langs:
proj_dict[lang] = cnt
cnt += 1
proj_matrix_slow = torch.randn(cnt, dim, dim)
proj_matrix_slow.normal_(mean=0.0, std=0.02)
proj_matrix_slow = nn.Parameter(proj_matrix_slow, requires_grad=False)
proj_matrix_fast = nn.Parameter(proj_matrix_slow.data.clone(), requires_grad=True)
return proj_dict, proj_matrix_fast, proj_matrix_slow
@register_model("infoxlm")
class InfoXlmModel(BaseFairseqModel):
def __init__(self, model_fast, model_slow, queue, proj=None):
super().__init__()
self.model_slow:nn.Module = model_slow
self.model_fast:nn.Module = model_fast
self.use_proj = False
self.share_proj = True
self.queue_size = queue.size(0)
self.register_buffer("queue", queue)
self.register_buffer("enqueue_cnt", torch.zeros(1, dtype=torch.long))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
if proj is not None:
self.use_proj = True
self.proj_dict, proj_matrix_fast, proj_matrix_slow = proj
# if "share_lang" in self.proj_dict: self.share_proj = True
assert "share_lang" in self.proj_dict
self.register_parameter("proj_matrix_fast", proj_matrix_fast)
self.register_parameter("proj_matrix_slow", proj_matrix_slow)
for param in self.model_slow.parameters():
param.requires_grad = False
@staticmethod
def add_args(parser):
parser.add_argument('--roberta-model-path', type=str, default="")
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--max-positions', type=int,
help='number of positional embeddings to learn')
parser.add_argument('--activation-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN')
parser.add_argument('--use_proj', default=False, action='store_true')
def is_queue_ready(self):
return int(self.enqueue_cnt) >= self.queue_size
@torch.no_grad()
def update_queue(self, k):
k = concat_all_gather(k)
batch_size = k.size(0)
ptr = int(self.queue_ptr)
# assert self.queue_size % batch_size == 0
if ptr + batch_size <= self.queue_size:
self.queue[ptr:ptr+batch_size, :] = k
ptr = (ptr + batch_size) % self.queue_size
else:
left_len = self.queue_size - ptr
self.queue[ptr:, :] = k[:left_len, :]
ptr = batch_size-left_len
self.queue[:ptr, :] = k[left_len:, :]
self.queue_ptr[0] = ptr
self.enqueue_cnt += batch_size
@classmethod
def build_model(cls, args, task):
model_fast = RobertaModel.build_model(args, task)
model_slow = RobertaModel.build_model(args, task)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model_fast.load_state_dict(state["model"], strict=True, args=args)
model_slow.load_state_dict(state["model"], strict=True, args=args)
else:
model_slow.load_state_dict(model_fast.state_dict(), strict=True, args=args)
proj = None
if args.use_proj:
# NOTE alway be share_proj
langs = ["share_lang"]
proj = build_projection_dict(langs, args.encoder_embed_dim, args.activation_fn, args.fp16)
if "xlco_queue_size" in args:
xlco_queue_size = args.xlco_queue_size
else: xlco_queue_size = 1
print("xlco_queue_size is set as %d" % xlco_queue_size, flush=True)
queue = torch.randn(xlco_queue_size, args.encoder_embed_dim)
return cls(model_fast, model_slow, queue, proj=proj)
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt',
data_name_or_path='.', bpe='sentencepiece', **kwargs):
raise NotImplementedError
def forward(self, src_tokens, use_model_fast=True, **kwargs):
forward_model = self.model_fast if use_model_fast else self.model_slow
return forward_model(src_tokens, **kwargs)
def forward_proj(self, rep, lang, use_model_fast=True, **kwargs):
proj_matrix = self.proj_matrix_fast if use_model_fast else self.proj_matrix_slow
if self.share_proj: lang = "share_lang"
if isinstance(lang, str):
return torch.mm(rep, proj_matrix[self.proj_dict[lang],:,:])
else:
proj_indices = [self.proj_dict[l] for l in lang]
batch_rep = rep.unsqueeze(1)
return torch.bmm(batch_rep, proj_matrix[proj_indices,:,:])[:,0,:]
def output_layer(self, features, use_model_fast=True, **kwargs):
forward_model = self.model_fast if use_model_fast else self.model_slow
return forward_model.decoder.output_layer(features, **kwargs)
@torch.no_grad()
def update_slow_weight(self, momentum):
for p1, p2 in zip(self.model_fast.parameters(), self.model_slow.parameters()):
assert p2.requires_grad == False
new_p2_data = p2.data * momentum + p1.data * (1. - momentum)
p2.data.copy_(new_p2_data)
if self.use_proj:
p1 = self.proj_matrix_fast.data
p2 = self.proj_matrix_slow.data
assert p2.requires_grad == False
new_p2_data = p2.data * momentum + p1.data * (1. - momentum)
p2.data.copy_(new_p2_data)
@register_model_architecture("infoxlm", "infoxlm_base")
def infoxlm_base(args):
roberta_base_architecture(args)
@register_model_architecture("infoxlm", "infoxlm_large")
def infoxlm_large(args):
roberta_large_architecture(args)
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/models/infoxlm.py |
import torch
from fairseq.data import FairseqDataset
class TLMDataset(FairseqDataset):
def __init__(self, src_dataset, tgt_dataset, bos, eos):
assert len(src_dataset) == len(tgt_dataset)
self.src_dataset = src_dataset
self.tgt_dataset = tgt_dataset
self.bos = bos
self.eos = eos
self._sizes = src_dataset.sizes + tgt_dataset.sizes
def __len__(self):
return len(self.src_dataset)
@property
def sizes(self):
return self._sizes
def __getitem__(self, index):
src_item = self.src_dataset[index]
tgt_item = self.tgt_dataset[index]
return torch.cat([
src_item.new([self.bos]), src_item, src_item.new([self.eos]),
tgt_item, tgt_item.new([self.eos]),
])
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/tlm_dataset.py |
import torch
from fairseq.data import BaseWrapperDataset
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from infoxlm.data.mlm_utils import get_mlm_dataset, get_prepended_token_block_dataset
def get_mlm_dataset_with_offset(args, dataset_path, vocab, mask_idx,mask_whole_words=None, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=mask_whole_words,
)
dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
'offsets': OffsetDataset(ptb_dataset, vocab),
},
sizes=[src_dataset.sizes],
)
return dataset
class OffsetDataset(BaseWrapperDataset):
def __init__(self, ptb_dataset, vocab):
super().__init__(ptb_dataset)
self.vocab = vocab
def get_check_ptb_offsets(self, ptb_item):
# parse ptb_item
eos_idx = self.vocab.eos()
bos_idx = self.vocab.bos()
_nonzero = (ptb_item == eos_idx).nonzero()
if len(_nonzero) != 2:
# raise ValueError
# NOTE WALKAROUND
_nonzero_0 = _nonzero[0].item()
_nonzero_1 = len(ptb_item)
else:
_nonzero_0 = _nonzero[0].item()
_nonzero_1 = _nonzero[1].item()
assert ptb_item[0].item() == bos_idx, (ptb_item[0].item(), bos_idx)
src_fr = 1
src_to = _nonzero[0].item()
trg_fr = src_to + 1
trg_to = _nonzero[1].item()
# print("ptb_item:")
# print(ptb_item)
# print("offsets:")
# print("%d %d %d %d" % (src_fr, src_to, trg_fr, trg_to))
# print("4 items: %d %d %d %d" % tuple(ptb_item[i].item() for i in [src_fr, src_to, trg_fr, trg_to]))
if src_to - src_fr <= 0 or trg_to - trg_fr <= 0:
print("[W] ptb_item=%s offsets=%d,%d,%d,%d" % (
str(ptb_item), src_fr, src_to, trg_fr, trg_to,
))
# raise ValueError
return src_fr, src_to, trg_fr, trg_to
def __getitem__(self, index):
ptb_item = self.dataset[index]
return self.get_check_ptb_offsets(ptb_item)
def collater(self, samples):
src_fr = [s[0] for s in samples]
src_to = [s[1] for s in samples]
trg_fr = [s[2] for s in samples]
trg_to = [s[3] for s in samples]
return src_fr, src_to, trg_fr, trg_to | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/offset_dataset.py |
import torch
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from fairseq.data.encoders.utils import get_whole_word_mask
def get_mlm_dataset(args, dataset_path, vocab, mask_idx, mask_whole_words=None, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=mask_whole_words,
)
dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
# 'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
return dataset
def add_mlm_args(parser):
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
def get_preprocessed_ptb_dataset(args, dataset_path, vocab, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine, )
if dataset is None:
raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path))
return dataset
def get_prepended_token_block_dataset(args, dataset_path, vocab, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine, )
if dataset is None:
raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path))
if not args.apply_ptb:
print("| [I] ptb not applied.", flush=True)
return dataset
dataset = TruncateDataset(dataset, args.tokens_per_sample - 1)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
args.tokens_per_sample - 1, # one less for <s>
pad=vocab.pad(),
eos=vocab.eos(),
break_mode=args.sample_break_mode,
)
print('| loaded {} blocks from: {}'.format(len(dataset), dataset_path), flush=True)
dataset = PrependTokenDataset(dataset, vocab.bos())
return dataset
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/mlm_utils.py |
import torch
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from fairseq.data.encoders.utils import get_whole_word_mask
from infoxlm.data.mlm_utils import get_prepended_token_block_dataset
from infoxlm.data.offset_dataset import OffsetDataset
def get_xlm_align_dataset_with_mask(args, dataset_path, vocab, mask_idx, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
)
dataset = NestedDictionaryDataset({
'net_input': {
'src_tokens': PadDataset(
ptb_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(ptb_dataset, reduce=False),
},
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(ptb_dataset, reduce=True),
'offsets': OffsetDataset(ptb_dataset, vocab),
'net_input_tlm': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
}, sizes=[ptb_dataset.sizes])
return dataset | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/xlm_align.py |
import numpy as np
import os
import torch
from threading import Thread
from fairseq.data import data_utils, FairseqDataset, FairseqIterableDataset
class DictIterDataset(FairseqIterableDataset):
def __init__(self, defn, sizes=None):
self.defn = defn
for v in self.defn.values():
if not isinstance(v, (FairseqIterableDataset, )):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
def set_epoch(self, epoch):
for ds in self.defn.values():
ds.set_epoch(epoch)
def __iter__(self):
iters = {key:iter(self.defn[key]) for key in self.defn}
while True:
try:
yield {key:next(iters[key]) for key in iters}
except StopIteration:
break
def __len__(self):
return min(len(v) for v in self.defn.values())
def collater(self, samples):
if len(samples) == 0:
return {}
sample = {}
for k, ds in self.defn.items():
sample[k] = ds.collater([s[k] for s in samples])
return sample
class DictDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
self.defn = defn
for v in self.defn.values():
if not isinstance(v, (FairseqDataset, )):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
def set_epoch(self, epoch):
for ds in self.defn.values():
ds.set_epoch(epoch)
def __getitem__(self, index):
ret = {key:self.defn[key][index] for key in self.defn}
return ret
def __len__(self):
return min(len(v) for v in self.defn.values())
def collater(self, samples):
if len(samples) == 0:
return {}
sample = {}
for k, ds in self.defn.items():
sample[k] = ds.collater([s[k] for s in samples])
# DEBUG
# print(sample)
return sample
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/dict_dataset.py |
data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/__init__.py |
|
import numpy as np
import torch
from fairseq.data import data_utils, FairseqDataset, MaskTokensDataset, TruncateDataset, BaseWrapperDataset
from infoxlm.data.dict_dataset import DictDataset
def get_xlco_dataset(args, dataset_path, vocab, mask_idx, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine)
dataset, _ = MaskTokensDataset.apply_mask(
dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=None,
)
dataset = XlcoDataset(dataset, vocab)
return dataset
class XlcoDataset(FairseqDataset):
def __init__(self, dataset, vocab, remove_bos_of_item2=True, seed=1):
# dataset: pair -> (line i, line i + 1) where i % 2 == 0
self.dataset = dataset
self.vocab = vocab
self.remove_bos_of_item2 = remove_bos_of_item2
self.seed = seed
self.epoch = 0
def set_epoch(self, epoch):
self.epoch = epoch
if hasattr(self.dataset, 'set_epoch'):
self.dataset.set_epoch(epoch)
def __len__(self):
return len(self.dataset) // 4
# NOTE mix-up contrast
def __getitem__(self, index):
src_item1 = self.dataset[index*4]
tgt_item1 = self.dataset[index*4+1]
src_item2 = self.dataset[index*4+2]
tgt_item2 = self.dataset[index*4+3]
with data_utils.numpy_seed(self.seed, self.epoch, index):
mode = np.random.randint(8)
if mode & 1: src_item1, src_item2 = src_item2, src_item1
if mode & 2: tgt_item1, tgt_item2 = tgt_item2, tgt_item1
bos = self.vocab.bos()
if self.remove_bos_of_item2 and src_item2[0] == bos:
src_item2 = src_item2[1:]
if self.remove_bos_of_item2 and tgt_item2[0] == bos:
tgt_item2 = tgt_item2[1:]
src_item = torch.cat([src_item1, src_item2])
tgt_item = torch.cat([tgt_item1, tgt_item2])
if mode & 4: src_item, tgt_item = tgt_item, src_item
return {
'id': index,
'source': src_item,
'target': tgt_item,
}
def collater(self, samples):
if len(samples) == 0:
return {}
pad_idx = self.vocab.pad()
eos_idx = self.vocab.eos()
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=False)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
tgt_tokens = merge('target', left_pad=False)
tgt_lengths = torch.LongTensor([s['target'].numel() for s in samples])
n_src_tokens = sum(len(s['source']) for s in samples)
n_tgt_tokens = sum(len(s['target']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': n_src_tokens + n_tgt_tokens,
'src_net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
# NOTE the Roberta forward function takes src_tokens as input
'tgt_net_input': {
'src_tokens': tgt_tokens,
'src_lengths': tgt_lengths,
},
}
return batch | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/data/xlco_dataset.py |
import collections
import logging
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch import distributed
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.data.data_utils import process_bpe_symbol
from infoxlm.utils import _get_logging_loss, construct_idx_tensor_from_list
@register_criterion('dwa_mlm_tlm')
class DwaMlmTlm(FairseqCriterion):
IGNORE_INDEX = 1000000
def __init__(self, args, task):
super().__init__(args, task)
self.padding_idx = self.task.dictionary.pad_index
@staticmethod
def add_args(parser):
parser.add_argument('--no_tlm_loss', default=False, action='store_true')
def forward_mlm(self, model, sample, reduce=True, dep_rep_size=3):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
# logger.warning(str(sample["net_input"]["src_tokens"]))
# logger.warning("index - " + str(sample["net_input"]["src_tokens"].max()))
# logger.warning("len - " + str(sample["net_input"]["src_lengths"].max()))
features, _ = model(**sample['net_input'], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'mlm_loss': logging_loss,
'mlm_ntokens': sample['ntokens'],
'mlm_nsentences': sample['nsentences'],
'mlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
hidden_sz = features.size(-1)
if hasattr(model, "qa_layer"):
dep_rep = features.new(hidden_sz * dep_rep_size).fill_(0)
dep_rep = model.qa_layer(dep_rep)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
if hasattr(model, "predictor"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.predictor(dep_rep)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def forward_tlm(self, model, sample, reduce=True, dep_rep_size=3, net_input_key="net_input_tlm"):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
# logger.warning(str(sample["net_input"]["src_tokens"]))
# logger.warning("index - " + str(sample["net_input"]["src_tokens"].max()))
# logger.warning("len - " + str(sample["net_input"]["src_lengths"].max()))
features, _ = model(**sample[net_input_key], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'tlm_loss': logging_loss,
'tlm_ntokens': sample['ntokens'],
'tlm_nsentences': sample['nsentences'],
'tlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
hidden_sz = features.size(-1)
if hasattr(model, "qa_layer"):
dep_rep = features.new(hidden_sz * dep_rep_size).fill_(0)
dep_rep = model.qa_layer(dep_rep)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
if hasattr(model, "predictor"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.predictor(dep_rep)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True, aligned_tokens=None, mlm=False, tlm=False):
if mlm:
return self.forward_mlm(model, sample, reduce, dep_rep_size=2)
elif tlm:
return self.forward_tlm(model, sample, reduce, dep_rep_size=2, net_input_key="net_input_tlm")
else:
return self.forward_denoise_word_alignment(model, sample, reduce, aligned_tokens, use_tlm_loss=(not self.args.no_tlm_loss))
def forward_masked_lm(self, features, tlm_targets, model):
masked_tokens = tlm_targets.ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if sample_size == 0: masked_tokens = None
logits = model.output_layer(features, masked_tokens=masked_tokens)
targets = tlm_targets
if sample_size != 0: targets = targets[masked_tokens]
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_output = {
'tlm_loss': _get_logging_loss(loss),
'tlm_sample_size': sample_size,
}
return loss, sample_size, logging_output
def _positions2masked_features(self, positions, features, hidden_sz):
# bsz, max_num_spans
# NOTE paddings are filled with -1, but we need to replace -1 to 0 to gather
positions4gather = positions.clone().detach()
positions4gather[positions==DwaMlmTlm.IGNORE_INDEX] = 0
# bsz, max_num_spans -> bsz, max_num_spans, hidden
positions4gather = positions4gather.unsqueeze(-1).expand(-1, -1, hidden_sz)
masked_features = features.gather(dim=1, index=positions4gather)
return masked_features
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
# loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
reduced_log = collections.defaultdict(float)
# TODO sa EM & F1
reduced_keys = ["sa_loss", 'sa_EM', 'sa_EM_tot', 'sa_nsentences', 'sa_ntokens', 'sa_sample_size', "tlm_loss", "tlm_sample_size", "mlm_ntokens", "mlm_nsentences", "mlm_sample_size", "mlm_loss"]
for log in logging_outputs:
for key in reduced_keys:
reduced_log[key] += log.get(key, 0)
eps = 1e-7
sa_sample_size = reduced_log["sa_sample_size"]
sa_loss = reduced_log["sa_loss"] / (sa_sample_size + eps) / math.log(2)
tlm_sample_size = reduced_log["tlm_sample_size"]
tlm_loss = reduced_log["tlm_loss"] / (tlm_sample_size + eps) / math.log(2)
mlm_sample_size = reduced_log["mlm_sample_size"]
mlm_loss = reduced_log["mlm_loss"] / (mlm_sample_size + eps) / math.log(2)
sample_size = sa_sample_size + tlm_sample_size + mlm_sample_size
loss = (reduced_log["sa_loss"] + reduced_log["tlm_loss"] + reduced_log["mlm_loss"]) / (sample_size + eps) / math.log(2)
# WALKAROUND
if reduced_log["sa_EM_tot"] < 1: reduced_log["sa_EM_tot"] = 1
agg_output = {
'loss': loss,
'ntokens': reduced_log["sa_ntokens"] + reduced_log["mlm_ntokens"],
'nsentences': reduced_log["sa_nsentences"] + reduced_log["mlm_nsentences"],
'dwa_loss': sa_loss,
'dwa_sample_size': sa_sample_size,
'dwa_EM': 0 if reduced_log["sa_EM_tot"] == 0 else 100 * reduced_log["sa_EM"] / reduced_log["sa_EM_tot"],
'mlm_loss': mlm_loss,
'mlm_sample_size': mlm_sample_size,
'tlm_loss': tlm_loss,
'tlm_sample_size': tlm_sample_size,
'sample_size': sample_size,
}
# DEBUG
# for k, v in agg_output.items():
# print("%s: %.2f" % (k, v), end=" | ")
# print("")
return agg_output
def construct_tensor_from_list(self, idx_list2d, lens, pad_idx, device=None):
max_len = max(lens)
padded_list = [list_i + [pad_idx] * (max_len - lens[i]) for i, list_i in enumerate(idx_list2d)]
tensor = torch.LongTensor(padded_list)
if device is not None:
tensor = tensor.to(device=device)
return tensor
def prepare_positions(self, sample, aligned_tokens, device=None):
masked_tokens = sample['target'].ne(self.padding_idx)
bsz = masked_tokens.size(0)
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
# NOTE aligned_tokens should be extracted from the jointly encoded representations
align_dicts = []
for tokens_i in aligned_tokens:
dict_i = {}
for src, trg in tokens_i:
dict_i[src] = trg
dict_i[trg] = src
align_dicts.append(dict_i)
positions_fwd = [[] for i in range(bsz)]
positions_bwd = [[] for i in range(bsz)]
masked_positions_fwd = [[] for i in range(bsz)]
masked_positions_bwd = [[] for i in range(bsz)]
pos_cnt_fwd = [0] * bsz
pos_cnt_bwd = [0] * bsz
for ij in masked_tokens.nonzero():
i = ij[0].item()
masked_j = ij[1].item()
if masked_j not in align_dicts[i]: continue
aligned_j = align_dicts[i][masked_j]
if src_fr[i] <= masked_j < src_to[i] and trg_fr[i] <= aligned_j < trg_to[i]:
masked_positions_fwd[i].append(masked_j)
positions_fwd[i].append(aligned_j)
pos_cnt_fwd[i] += 1
elif src_fr[i] <= aligned_j < src_to[i] and trg_fr[i] <= masked_j < trg_to[i]:
masked_positions_bwd[i].append(masked_j)
positions_bwd[i].append(aligned_j)
pos_cnt_bwd[i] += 1
else:
print("[W] Value Error of alignments!!!")
continue
positions_fwd = self.construct_tensor_from_list(positions_fwd, pos_cnt_fwd, DwaMlmTlm.IGNORE_INDEX, device=device)
positions_bwd = self.construct_tensor_from_list(positions_bwd, pos_cnt_bwd, DwaMlmTlm.IGNORE_INDEX, device=device)
masked_positions_fwd = self.construct_tensor_from_list(masked_positions_fwd, pos_cnt_fwd, DwaMlmTlm.IGNORE_INDEX, device=device)
masked_positions_bwd = self.construct_tensor_from_list(masked_positions_bwd, pos_cnt_bwd, DwaMlmTlm.IGNORE_INDEX, device=device)
return positions_fwd, positions_bwd, masked_positions_fwd, masked_positions_bwd
def forward_denoise_word_alignment(self, model, sample, reduce=True, aligned_tokens=None, use_tlm_loss=True):
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
features, _ = model(**sample["net_input_tlm"], features_only=True)
device = features.device
positions_fwd, positions_bwd, masked_positions_fwd, masked_positions_bwd = \
self.prepare_positions(sample, aligned_tokens, device=device)
if use_tlm_loss:
tlm_loss, tlm_sample_size, tlm_logging_output = self.forward_masked_lm(
features, sample["target"], model)
fwd_loss, fwd_em_cnt, fwd_tot = self.get_token_align_loss(model, features, positions_fwd, masked_positions_fwd, trg_fr, trg_to)
bwd_loss, bwd_em_cnt, bwd_tot = self.get_token_align_loss(model, features, positions_bwd, masked_positions_bwd, src_fr, src_to)
loss = fwd_loss + bwd_loss
em_cnt = fwd_em_cnt + bwd_em_cnt
tot = fwd_tot + bwd_tot
em = 0 if tot == 0 else 100.0 * em_cnt / tot
sample_size = tot
logging_output = {
'sa_loss': _get_logging_loss(loss),
'sa_EM': em_cnt,
'sa_EM_tot': tot,
'sa_nsentences': sample["nsentences"],
'sa_ntokens': sample["ntokens"],
'sa_sample_size': sample_size,
}
if use_tlm_loss:
loss += tlm_loss
sample_size += tlm_sample_size
logging_output.update(tlm_logging_output)
else:
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.output_layer(dep_rep, features_only=True)
loss += dep_rep.mean() * 0.0
if hasattr(model, "forward_proj"):
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.forward_proj(dep_rep[None, :], "en", use_model_fast=True)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def get_token_align_loss(self, model, features, positions, masked_positions, fr, to):
if len(positions.view(-1)) <= 0:
dep_rep = features[0, 0, :]
loss = dep_rep.mean() * 0.0
em_cnt = tot = 0
return loss, em_cnt, tot
bsz, seq_len, hidden_sz = features.size()
# _, max_num_spans = positions.size()
device = features.device
# get attention mask
fr_tensor = torch.LongTensor(fr).to(device=device)
to_tensor = torch.LongTensor(to).to(device=device)
# bsz, seq_len
attention_mask = (torch.arange(seq_len)[None, :].to(device=device) >= fr_tensor[:, None]) & (torch.arange(seq_len)[None, :].to(device=device) < to_tensor[:, None])
# bsz, 1, seq_len
attention_mask = attention_mask[:, None, :]
attention_mask = (1.0-attention_mask.half()) * -1e4
# print(attention_mask)
# masked_features: bsz, max_num_spans, hidden
masked_features = self._positions2masked_features(masked_positions, features, hidden_sz)
q_features = model.q_linear(masked_features)
# bsz, len, hidden
k_features = model.k_linear(features)
# bsz, max_num_spans, len
attention_scores = torch.matmul(q_features, k_features.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(hidden_sz)
attention_scores = attention_scores + attention_mask
logits = attention_scores
loss_fct = nn.CrossEntropyLoss(ignore_index=DwaMlmTlm.IGNORE_INDEX, reduction='sum')
loss = loss_fct(logits.view(-1, logits.size(-1)), positions.view(-1))
# calc EM & F1
def _get_em_mask(logits, targets):
logits = logits.view(-1, logits.size(-1))
targets = targets.view(-1)
prediction = logits.argmax(dim=-1)
return targets == prediction, (targets != DwaMlmTlm.IGNORE_INDEX).sum().item()
em_mask, tot = _get_em_mask(logits, positions)
em_cnt = em_mask.sum().item()
return loss, em_cnt, tot
| data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/criterions/xlm_align.py |
import os
import importlib
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('infoxlm.criterions.' + module) | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/criterions/__init__.py |
import collections
import logging
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch import distributed
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
logger = logging.getLogger(__name__)
@register_criterion('xlco')
class XlCoCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
parser.add_argument('--xlco_queue_size', default=256, type=int)
parser.add_argument('--xlco_softmax_tau', default=0.25, type=float)
parser.add_argument('--xlco_layer', default=8, type=int)
parser.add_argument('--xlco_lambda', default=1.0, type=float)
def __init__(self, args, task):
super().__init__(args, task)
self.criterion = nn.CrossEntropyLoss(reduction='sum')
self.xlco_queue_size = args.xlco_queue_size
def contrastive_loss(self, q, k, queue):
queue = queue.clone().detach()
N, C = q.size()
assert k.size() == (N,C), (N, C, k.size())
logits_pos = torch.bmm(q.view(N, 1, C), k.view(N, C, 1)).view(N, 1)
logits_neg = torch.mm(q, queue.transpose(0, 1))
logits = torch.cat([logits_pos, logits_neg], dim=1) / self.args.xlco_softmax_tau
labels = torch.zeros(N).cuda().long()
loss = self.criterion(logits, labels)
cxlm_ncorrect = utils.item((logits.argmax(dim=1) == labels).sum())
return loss, cxlm_ncorrect
def _get_logging_loss(self, loss, reduce=True):
if loss is None: return 0
return utils.item(loss.data) if reduce else loss.data
def forward_xlco(self, model, sample, reduce=True):
cxlm_head_key = "share_lang"
with torch.no_grad():
_, inner_states = model(**sample['tgt_net_input'], use_model_fast=False, features_only=True, return_all_hiddens=True)
slow_features = inner_states["inner_states"][self.args.xlco_layer]
slow_features = slow_features[0, :, :].clone().detach()
if self.args.use_proj:
slow_rep = model.forward_proj(
slow_features, cxlm_head_key, use_model_fast=False)
else: slow_rep = slow_features
if model.is_queue_ready():
fast_features, inner_states = model(**sample['src_net_input'],
use_model_fast=True, features_only=True, return_all_hiddens=True)
fast_features = inner_states["inner_states"][-1][0, :, :]
fast_features8 = inner_states["inner_states"][self.args.xlco_layer][0, :, :]
if self.args.use_proj:
fast_rep = model.forward_proj(
fast_features8, cxlm_head_key, use_model_fast=True)
else: fast_rep = fast_features8
cxlm_loss, cxlm_ncorrect = self.contrastive_loss(fast_rep, slow_rep, model.queue)
cxlm_loss *= self.task.xlco_lambda
loss = cxlm_loss
# NOTE WALKAROUND We have to use all parameters for ddp.
dep_logits = model.output_layer(fast_features, features_only=True)
loss += dep_logits.mean() * 0.0
if hasattr(model, "q_linear"):
hidden_sz = fast_features.size(-1)
dep_rep = fast_features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
cxlm_logging_loss = self._get_logging_loss(cxlm_loss, reduce)
else:
loss = None
cxlm_logging_loss = 0
cxlm_ncorrect = 0
if model.training:
rank = self.args.distributed_rank
model.update_queue(slow_rep)
sample_size = sample["nsentences"]
logging_output = {
'cxlm_loss': cxlm_logging_loss,
'cxlm_nsentences': sample["nsentences"],
'cxlm_ntokens': sample["ntokens"],
'cxlm_sample_size': sample_size,
'cxlm_ncorrect': cxlm_ncorrect,
}
return loss, sample_size, logging_output
def forward_mlm(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
features, _ = model(**sample['net_input'], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'mlm_loss': logging_loss,
'mlm_ntokens': sample['ntokens'],
'mlm_nsentences': sample['nsentences'],
'mlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
if self.args.use_proj:
dep_rep = model.forward_proj(features[:, 0, :], "en", use_model_fast=True)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True, mlm=False):
if mlm:
return self.forward_mlm(model, sample, reduce=reduce)
else:
return self.forward_xlco(model, sample, reduce=reduce)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
# loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
reduced_log = collections.defaultdict(float)
reduced_keys = ["cxlm_loss", "mlm_loss", "cxlm_ntokens",
"cxlm_nsentences", "mlm_ntokens", "mlm_nsentences", "cxlm_sample_size",
"mlm_sample_size", "cxlm_ncorrect", "momentum"]
for log in logging_outputs:
for key in reduced_keys:
reduced_log[key] += log.get(key, 0)
loss_sum_cxlm = reduced_log["cxlm_loss"]
loss_sum_mlm = reduced_log["mlm_loss"]
loss_sum = loss_sum_cxlm + loss_sum_mlm
cxlm_ntokens = reduced_log["cxlm_ntokens"]
cxlm_nsentences = reduced_log["cxlm_nsentences"]
mlm_ntokens = reduced_log["mlm_ntokens"]
mlm_nsentences = reduced_log["mlm_nsentences"]
cxlm_sample_size = reduced_log["cxlm_sample_size"]
mlm_sample_size = reduced_log["mlm_sample_size"]
sample_size = cxlm_sample_size + mlm_sample_size
ncorrect = reduced_log["cxlm_ncorrect"]
eps = 1e-7
agg_output = {
'loss': loss_sum / (sample_size + eps) / math.log(2),
'ntokens': cxlm_ntokens + mlm_ntokens,
'nsentences': cxlm_nsentences + mlm_nsentences,
'xlco_loss': loss_sum_cxlm / (cxlm_sample_size + eps) / math.log(2),
'mlm_loss': loss_sum_mlm / (mlm_sample_size + eps) / math.log(2),
'xlco_accuracy': 100.0 * ncorrect / (cxlm_nsentences + eps),
'momentum': reduced_log["momentum"] / len(logging_outputs),
'xlco_ntokens': cxlm_ntokens,
'xlco_nsentences': cxlm_nsentences,
'mlm_ntokens': mlm_ntokens,
'mlm_nsentences': mlm_nsentences,
'xlco_sample_size': cxlm_sample_size,
'mlm_sample_size': mlm_sample_size,
'sample_size': sample_size,
}
# DEBUG
# for k, v in agg_output.items():
# print("%s: %f" % (k, v), end=" | ")
# print("")
return agg_output | data2vec_vision-main | infoxlm/src-infoxlm/infoxlm/criterions/xlco.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
import site
# PEP517 workaround
site.ENABLE_USER_SITE = True
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
)
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
)
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core==1.0.6",
"importlib-resources==4.1.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
"sentencepiece",
"omegaconf==2.0.6", #"langid"
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.exists(fairseq_examples):
os.unlink(fairseq_examples)
| data2vec_vision-main | deltalm/src/setup.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| data2vec_vision-main | deltalm/src/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import importlib
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
dependencies = [
"dataclasses",
"hydra",
"numpy",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| data2vec_vision-main | deltalm/src/hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
"A B C D",
"B C D",
"C D",
"D",
]
ref_ids1 = list(
map(
torch.IntTensor,
[
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
],
)
)
ref_ids2 = list(
map(
torch.IntTensor,
[
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
],
)
)
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode="w") as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
def test_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999 #fairseq:overwrite\n"
"<s> 999 #fairseq:overwrite\n"
"</s> 999 #fairseq:overwrite\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index("<pad>"), 1)
self.assertEqual(d.index("foo"), 3)
self.assertEqual(d.index("<unk>"), 4)
self.assertEqual(d.index("<s>"), 5)
self.assertEqual(d.index("</s>"), 6)
self.assertEqual(d.index(","), 7)
self.assertEqual(d.index("▁de"), 8)
def test_no_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n"
)
d = Dictionary()
with self.assertRaisesRegex(RuntimeError, "Duplicate"):
d.add_from_file(dict_file)
def test_space(self):
# for example, character models treat space as a symbol
dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n")
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index(" "), 4)
self.assertEqual(d.index("a"), 5)
self.assertEqual(d.index("b"), 6)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_dictionary.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
import unittest
from multiprocessing import Manager
import torch
import torch.nn as nn
from fairseq import distributed_utils, optim
from omegaconf import OmegaConf
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def setup_model_loss_criterion(cfg, args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
cfg.distributed_training.distributed_rank = args.distributed_rank
if cfg.distributed_training.distributed_world_size > 1:
distributed_utils.distributed_init(cfg)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(
cfg=cfg.bmuf,
optimizer=optimizer
)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer, **unused):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(cfg, args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.model_parallel_size = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
cfg = OmegaConf.create()
cfg.optimization = OmegaConf.create()
cfg.common = OmegaConf.create()
cfg.distributed_training = OmegaConf.create()
cfg.dataset = OmegaConf.create()
cfg.bmuf = OmegaConf.create()
cfg.optimizer = OmegaConf.create()
cfg.bmuf.global_sync_iter = args.global_sync_iter
cfg.bmuf.block_momentum = args.block_momentum
cfg.bmuf.block_lr = args.block_lr
cfg.dataset.batch_size = args.batch_size
cfg.optimization.lr = args.lr
cfg.optimizer.momentum = args.momentum
cfg.optimizer.weight_decay = args.weight_decay
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.use_nbm = args.use_nbm
cfg.bmuf.average_sync = args.average_sync
cfg.common.model_parallel_size = args.model_parallel_size
cfg.distributed_training.distributed_backend = args.distributed_backend
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.distributed_training.distributed_init_method = args.distributed_init_method
cfg.distributed_training.distributed_port = args.distributed_port
return cfg, args
@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs")
class TestBMUF(unittest.TestCase):
def bmuf_process(self, cfg, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context("spawn")
for rank in range(args.distributed_world_size):
p = ctx.Process(
target=single_gpu_training, args=(cfg, args, rank, iterations, results)
)
p.start()
processes.append(p)
for p in processes:
p.join()
return results
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
cfg, args = setup_args()
iterations = 1
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
cfg, args = setup_args()
args.warmup_iterations = 20
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
cfg, args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.global_sync_iter = args.global_sync_iter
iterations = 25
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_single_gpu_bmuf(self):
# Train model for 5 iterations and use GPU 1
cfg, args = setup_args()
args.distributed_world_size = 1
args.warmup_iterations = 5
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert len(results) == 1
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_bmuf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
]
)
right_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
]
)
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
]
)
left_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
]
)
right_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
]
)
right_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
]
)
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
for p in params:
p.grad = torch.full((5,), fill_value=2.0)
grad_norm = utils.clip_grad_norm_(params, 1.0)
exp_grad_norm = torch.full((15,), fill_value=2.0).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
def test_resolve_max_positions_with_tuple(self):
resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
self.assertEqual(resolved, (2000, 100, 2000))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator(self, ref=None, itr=None):
if ref is None:
assert itr is None
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert len(ref) == 10
assert itr is not None
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, 0)
self.assertEqual(next(itr), ref[0])
self.assertEqual(itr.n, 1)
self.assertEqual(next(itr), ref[1])
self.assertEqual(itr.n, 2)
itr.skip(3)
self.assertEqual(itr.n, 5)
self.assertEqual(next(itr), ref[5])
itr.skip(3)
self.assertEqual(itr.n, 9)
self.assertEqual(next(itr), ref[9])
self.assertFalse(itr.has_next())
def test_grouped_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.GroupedIterator(x, 1)
self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
itr = iterators.GroupedIterator(x, 4)
self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
itr = iterators.GroupedIterator(x, 5)
self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.GroupedIterator(x, 3))
itr = iterators.GroupedIterator(x, 3)
self.test_counting_iterator(ref, itr)
def test_sharded_iterator(self):
# test correctness
x = list(range(10))
itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0)
self.assertEqual(list(itr), x)
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0)
self.assertEqual(list(itr), [0, 2, 4, 6, 8])
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1)
self.assertEqual(list(itr), [1, 3, 5, 7, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.assertEqual(list(itr), [0, 3, 6, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1)
self.assertEqual(list(itr), [1, 4, 7, None])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2)
self.assertEqual(list(itr), [2, 5, 8, None])
# test CountingIterator functionality
x = list(range(30))
ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0))
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.test_counting_iterator(ref, itr)
def test_counting_iterator_take(self):
ref = list(range(10))
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
def test_counting_iterator_buffered_iterator_take(self):
ref = list(range(10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(buffered_itr), 5)
self.assertEqual(len(list(iter(buffered_itr))), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
ref = list(range(4, 10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr, start=4)
itr.take(5)
self.assertEqual(len(itr), 5)
self.assertEqual(len(buffered_itr), 1)
self.assertEqual(next(itr), ref[0])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_iterators.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import tests.utils as test_utils
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS """
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS """
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self,
src_tokens_no_pad,
src_dict,
append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset,
src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_noising.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.lstm import LSTMModel
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitLSTMModel(unittest.TestCase):
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
def assertTensorEqual(self, t1, t2):
t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs
t2 = t2[~torch.isnan(t2)]
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def test_jit_and_export_lstm(self):
task, parser = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ""
model = LSTMModel.build_model(args, task)
scripted_model = torch.jit.script(model)
self._test_save_and_load(scripted_model)
def test_assert_jit_vs_nonjit_(self):
task, parser = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ""
model = LSTMModel.build_model(args, task)
model.eval()
scripted_model = torch.jit.script(model)
scripted_model.eval()
idx = len(task.source_dictionary)
iter = 100
# Inject random input and check output
seq_len_tensor = torch.randint(1, 10, (iter,))
num_samples_tensor = torch.randint(1, 10, (iter,))
for i in range(iter):
seq_len = seq_len_tensor[i]
num_samples = num_samples_tensor[i]
src_token = (torch.randint(0, idx, (num_samples, seq_len)),)
src_lengths = torch.randint(1, seq_len + 1, (num_samples,))
src_lengths, _ = torch.sort(src_lengths, descending=True)
# Force the first sample to have seq_len
src_lengths[0] = seq_len
prev_output_token = (torch.randint(0, idx, (num_samples, 1)),)
result = model(src_token[0], src_lengths, prev_output_token[0], None)
scripted_result = scripted_model(
src_token[0], src_lengths, prev_output_token[0], None
)
self.assertTensorEqual(result[0], scripted_result[0])
self.assertTensorEqual(result[1], scripted_result[1])
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_lstm_jitable.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor(
[
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
bidirectional_attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=True
)
bidirectional_attention_sparse_mask = (
bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
)
torch.all(
torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask)
)
sparse_mask = torch.tensor(
[
[
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[
0,
0,
0,
0,
float("-inf"),
float("-inf"),
float("-inf"),
float("-inf"),
],
[0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
float("-inf"),
float("-inf"),
],
[
float("-inf"),
float("-inf"),
float("-inf"),
0,
0,
0,
0,
float("-inf"),
],
[float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0],
]
)
attention = SparseMultiheadAttention(
16, 1, stride=4, expressivity=1, is_bidirectional=False
)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_sparse_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import uuid
from fairseq import metrics
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate() as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1.5)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar("loss", 2)
self.assertEqual(a.get_smoothed_values()["loss"], 1)
self.assertEqual(b.get_smoothed_values()["loss"], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar("loss", 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar("loss", 2)
with metrics.aggregate() as layer3:
metrics.log_scalar("loss", 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar("loss", 4)
metrics.log_scalar("loss", 1.5)
self.assertEqual(layer4.get_smoothed_values()["loss"], 4)
self.assertEqual(layer3.get_smoothed_values()["loss"], 3)
self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5)
self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
metrics.log_scalar("loss", 3)
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar("loss", 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar("loss", 2)
metrics.log_scalar("loss", 6)
self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3)
self.assertEqual(other.get_smoothed_values()["loss"], 2)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_metrics.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import unittest
from io import StringIO
from unittest.mock import MagicMock, patch
import torch
from fairseq import checkpoint_utils, data
from omegaconf import OmegaConf
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
"train_iterator": {
"epoch": epoch,
"iterations_in_epoch": iterations_in_epoch,
"shuffle": False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1)
tokens_ds = data.TokenBlockDataset(
tokens,
sizes=[tokens.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(
tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False
)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
def get_mock_cfg(finetune_from_model):
cfg_mock = OmegaConf.create(
{
"checkpoint": {
"optimizer_overrides": "{}",
"reset_dataloader": False,
"reset_meters": False,
"reset_optimizer": False,
"reset_lr_scheduler": False,
"finetune_from_model": finetune_from_model,
"model_parallel_size": 1,
},
"common": {
"model_parallel_size": 1,
},
}
)
return cfg_mock
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.cfg_mock = get_mock_cfg(None)
self.patches = {
"os.makedirs": MagicMock(),
"os.path.join": MagicMock(),
"os.path.isfile": MagicMock(return_value=True),
"os.path.isabs": MagicMock(return_value=False),
"fairseq.file_io.PathManager.exists": MagicMock(return_value=False),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
logging.disable(logging.CRITICAL)
def tearDown(self):
patch.stopall()
logging.disable(logging.NOTSET)
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range(150 - 52):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches["os.path.isfile"].return_value = False
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_finetune_from_model_args_conflict(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
for arg in [
"reset_optimizer",
"reset_lr_scheduler",
"reset_meters",
"reset_dataloader",
]:
with self.subTest(arg=arg):
cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt")
cfg_mock["checkpoint"][arg] = True
with self.assertRaises(Exception) as context:
_, _ = checkpoint_utils.load_checkpoint(
cfg_mock.checkpoint, trainer
)
self.assertTrue(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
in str(context.exception)
)
def test_finetune_from_model(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
def mock_finetune_exist(path):
if path == from_model_path:
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertTrue(reset_optimizer)
self.assertTrue(reset_lr_scheduler)
self.assertTrue(reset_meters)
def test_finetune_from_model_resume(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
# launch second time
# both restore_file=checkpoint_last.pt and finetune_from_model are set
def mock_finetune_exist(path):
if path == from_model_path or path.endsWith("checkpoint_last.pt"):
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertFalse(reset_optimizer)
self.assertFalse(reset_lr_scheduler)
self.assertFalse(reset_meters)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import tests.utils as test_utils
import torch
from fairseq.sequence_scorer import SequenceScorer
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
"source": torch.LongTensor([w1, w2, eos]),
"target": torch.LongTensor([w1, w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, w1, eos]),
},
{
"source": torch.LongTensor([w2, eos]),
"target": torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]
),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample["id"].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]["target"])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_sequence_scorer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.transformer import TransformerModel
from tests.test_sequence_generator import get_dummy_task_and_parser
class TestInferenceDropout(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
TransformerModel.add_args(self.parser)
self.args = self.parser.parse_args([])
self.args.encoder_layers = 2
self.args.decoder_layers = 1
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_sets_inference_dropout_to_true(self):
self.args.retain_dropout = True
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.apply_during_inference
def test_inference_dropout_false_by_default(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert not self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
def test_applies_training_mode(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
assert self.transformer_model.encoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.training
self.transformer_model.eval()
assert not self.transformer_model.decoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert not layer.dropout_module.training
def test_retain_modules(self):
self.args.retain_dropout = True
self.args.retain_dropout_modules = [
"TransformerEncoder",
"TransformerEncoderLayer",
]
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert not self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.decoder.layers:
assert not layer.dropout_module.apply_during_inference
| data2vec_vision-main | deltalm/src/tests/test_inference_dropout.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f"Unexpected resultant key padding mask: {key_padding_mask}"
f" given current: {c[0]} and previous: {c[1]}",
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import tempfile
import unittest
from io import StringIO
from fairseq import checkpoint_utils
from tests.utils import (
create_dummy_data,
preprocess_translation_data,
train_translation_model,
)
class TestCheckpointUtils(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@contextlib.contextmanager
def _train_transformer(self, seed, extra_args=None):
if extra_args is None:
extra_args = []
with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--seed",
str(seed),
]
+ extra_args,
)
yield os.path.join(data_dir, "checkpoint_last.pt")
def test_load_model_ensemble_and_task(self):
with contextlib.redirect_stdout(StringIO()):
with self._train_transformer(seed=123) as model1:
with self._train_transformer(seed=456) as model2:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model1, model2]
)
self.assertEqual(len(ensemble), 2)
# after Transformer has been migrated to Hydra, this will probably
# become cfg.common.seed
self.assertEqual(ensemble[0].args.seed, 123)
self.assertEqual(ensemble[1].args.seed, 456)
# the task from the first model should be returned
self.assertEqual(task.args.seed, 123)
def test_prune_state_dict(self):
with contextlib.redirect_stdout(StringIO()):
extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"]
with self._train_transformer(seed=1, extra_args=extra_args) as model:
ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task(
filenames=[model],
arg_overrides={
"encoder_layers_to_keep": "0,2",
"decoder_layers_to_keep": "1",
},
)
self.assertEqual(len(ensemble), 1)
self.assertEqual(len(ensemble[0].encoder.layers), 2)
self.assertEqual(len(ensemble[0].decoder.layers), 1)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_checkpoint_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import numpy as np
from fairseq.data import ListDataset, ResamplingDataset
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ["ab", "c", "def", "ghij"]
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(
self.strings, np.array([len(s) for s in self.strings])
)
def _test_common(self, resampling_dataset, iters):
assert len(self.dataset) == len(self.strings) == len(self.weights)
assert len(resampling_dataset) == self.size_ratio * len(self.strings)
results = {"ordered_by_size": True, "max_distribution_diff": 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert len(indices) == len(resampling_dataset)
prev_size = -1
for i in indices:
cur_size = resampling_dataset.size(i)
# Make sure indices map to same sequences within an epoch
assert resampling_dataset[i] == resampling_dataset[i]
# Make sure length of sequence is correct
assert cur_size == len(resampling_dataset[i])
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if prev_size > cur_size:
results["ordered_by_size"] = False
prev_size = cur_size
assert set(freqs.keys()) == set(self.strings)
for s, weight in zip(self.strings, self.weights):
freq = freqs[s] / totalfreqs
expected_freq = weight / sum(self.weights)
results["max_distribution_diff"] = max(
results["max_distribution_diff"], abs(expected_freq - freq)
)
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=False,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = False, the batches should be returned in
# arbitrary order of size.
assert not results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=True,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = True, the batches should be returned in
# increasing order of size.
assert results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_resampling_dataset.py |
data2vec_vision-main | deltalm/src/tests/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import (
BacktranslationDataset,
LanguagePairDataset,
TransformEosDataset,
)
from fairseq.sequence_generator import SequenceGenerator
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
self.src_tokens,
self.src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
self.cuda = torch.cuda.is_available()
def _backtranslation_dataset_helper(
self,
remove_eos_from_input_src,
remove_eos_from_output_src,
):
tgt_dataset = LanguagePairDataset(
src=self.tgt_dataset,
src_sizes=self.tgt_dataset.sizes,
src_dict=self.tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
generator = SequenceGenerator(
[self.model],
tgt_dict=self.tgt_dict,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
)
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# remove eos from the input src
remove_eos_from_src=remove_eos_from_input_src,
),
src_dict=self.tgt_dict,
backtranslation_fn=(
lambda sample: generator.generate([self.model], sample)
),
output_collater=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# if we remove eos from the input src, then we need to add it
# back to the output tgt
append_eos_to_tgt=remove_eos_from_input_src,
remove_eos_from_src=remove_eos_from_output_src,
).collater,
cuda=self.cuda,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_from_output_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=True,
)
def test_backtranslation_dataset_with_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=False,
)
def test_backtranslation_dataset_no_eos_in_input_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=True,
remove_eos_from_output_src=False,
)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_backtranslation_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import random
import sys
import tempfile
import unittest
from io import StringIO
import torch
from fairseq import options
from fairseq_cli import eval_lm, train, validate
from tests.utils import (
create_dummy_data,
generate_main,
preprocess_lm_data,
preprocess_summarization_data,
preprocess_translation_data,
train_translation_model,
)
try:
import transformers # noqa
has_hf_transformers = True
except ImportError:
has_hf_transformers = False
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--dataset-impl", "raw"])
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"]
)
generate_main(data_dir, ["--dataset-impl", "raw"])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_update_freq") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"]
)
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_max_positions") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
["--max-target-positions", "5"],
)
self.assertTrue(
"skip this example with --skip-invalid-size-inputs-valid-test"
in str(context.exception)
)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--max-target-positions",
"5",
"--skip-invalid-size-inputs-valid-test",
],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_sampling") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en")
generate_main(
data_dir,
[
"--sampling",
"--temperature",
"2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topk",
"3",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--sampling",
"--sampling-topp",
"0.2",
"--beam",
"2",
"--nbest",
"2",
],
)
generate_main(
data_dir,
[
"--diversity-rate",
"0.5",
"--beam",
"6",
],
)
with self.assertRaises(ValueError):
generate_main(
data_dir,
[
"--diverse-beam-groups",
"4",
"--match-source-len",
],
)
generate_main(data_dir, ["--prefix-size", "2"])
generate_main(data_dir, ["--retain-dropout"])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--eval-bleu",
"--eval-bleu-print-samples",
"--eval-bleu-remove-bpe",
"--eval-bleu-detok",
"space",
"--eval-bleu-args",
'{"beam": 4, "min_len": 10}',
],
)
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm_wiseman_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lstm",
[
"--encoder-layers",
"2",
"--encoder-bidirectional",
"--encoder-hidden-size",
"16",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--decoder-layers",
"2",
],
)
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
run_validation=True,
)
generate_main(data_dir)
def test_transformer_with_activation_checkpointing(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--checkpoint-activations",
],
run_validation=True,
)
generate_main(data_dir)
def test_multilingual_transformer(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_transformer_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch="multilingual_transformer",
task="multilingual_translation",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"multilingual_translation",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
@unittest.skipIf(sys.platform.lower() == "darwin", "skip latent depth test on MacOS")
def test_multilingual_translation_latent_depth(self):
# test with latent depth in encoder, decoder, or both
encoder_latent_layer = [[], ["--encoder-latent-layer"]]
decoder_latent_layer = [[], ["--decoder-latent-layer"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_latent_layer)):
for j in range(len(decoder_latent_layer)):
if i == 0 and j == 0:
continue
enc_ll_flag = encoder_latent_layer[i]
dec_ll_flag = decoder_latent_layer[j]
with tempfile.TemporaryDirectory(
f"test_multilingual_translation_latent_depth_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="latent_multilingual_transformer",
task="multilingual_translation_latent_depth",
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--share-encoders",
"--share-decoders",
"--sparsity-weight",
"0.1",
]
+ enc_ll_flag
+ dec_ll_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
]
+ enc_ll_flag
+ dec_ll_flag,
)
generate_main(
data_dir,
extra_flags=[
"--user-dir",
"examples/latent_depth/latent_depth_src",
"--task",
"multilingual_translation_latent_depth",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ll_flag
+ dec_ll_flag,
)
def test_translation_multi_simple_epoch(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [
[],
["--encoder-langtok", "src"],
["--encoder-langtok", "tgt"],
]
decoder_langtok_flags = [[], ["--decoder-langtok"]]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(
f"test_translation_multi_simple_epoch_{i}_{j}"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=["--joined-dictionary"]
)
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out,out-in"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out,out-in",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_no_vepoch(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=[]
)
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_translation_multi_simple_epoch_dicts(self):
# test with all combinations of encoder/decoder lang tokens
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ["--encoder-langtok", "src"]
dec_ltok_flag = ["--decoder-langtok"]
with tempfile.TemporaryDirectory(
"test_translation_multi_simple_epoch_dict"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(
data_dir, extra_flags=[]
)
train_translation_model(
data_dir,
arch="transformer",
task="translation_multi_simple_epoch",
extra_flags=[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--sampling-method",
"temperature",
"--sampling-temperature",
"1.5",
"--virtual-epoch-size",
"1000",
]
+ enc_ltok_flag
+ dec_ltok_flag,
lang_flags=["--lang-pairs", "in-out"],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
"--task",
"translation_multi_simple_epoch",
"--lang-pairs",
"in-out",
"--source-lang",
"in",
"--target-lang",
"out",
]
+ enc_ltok_flag
+ dec_ltok_flag,
)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_cross_self_attention"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--no-cross-attention",
"--cross-self-attention",
],
run_validation=True,
)
generate_main(data_dir, extra_flags=[])
def test_transformer_pointer_generator(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_transformer_pointer_generator"
) as data_dir:
create_dummy_data(data_dir)
preprocess_summarization_data(data_dir)
train_translation_model(
data_dir,
"transformer_pointer_generator",
extra_flags=[
"--user-dir",
"examples/pointer_generator/pointer_generator_src",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--alignment-layer",
"-1",
"--alignment-heads",
"1",
"--source-position-markers",
"0",
],
run_validation=True,
extra_valid_flags=["--user-dir", "examples/pointer_generator/pointer_generator_src"],
)
generate_main(
data_dir,
extra_flags=["--user-dir", "examples/pointer_generator/pointer_generator_src"],
)
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"lightweight",
"--decoder-conv-type",
"lightweight",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"lightconv_iwslt_de_en",
[
"--encoder-conv-type",
"dynamic",
"--decoder-conv-type",
"dynamic",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"cmlm_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--pred-length-offset",
"--length-loss-factor",
"0.1",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"0",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
# def test_nat_crf_transformer(self):
# with contextlib.redirect_stdout(StringIO()):
# with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir:
# create_dummy_data(data_dir)
# preprocess_translation_data(data_dir, ['--joined-dictionary'])
# train_translation_model(data_dir, 'nacrf_transformer', [
# '--apply-bert-init', '--criterion',
# 'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
# '--length-loss-factor', '0.1',
# '--word-ins-loss-factor', '0.5',
# '--crf-lowrank-approx', '1',
# '--crf-beam-approx', '1'
# ], task='translation_lev')
# generate_main(data_dir, [
# '--task', 'translation_lev',
# '--iter-decode-max-iter', '0',
# '--iter-decode-eos-penalty', '0',
# '--print-step',
# ])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_iterative_nonautoregressive_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"iterative_nonautoregressive_transformer",
[
"--apply-bert-init",
"--src-embedding-copy",
"--criterion",
"nat_loss",
"--noise",
"full_mask",
"--stochastic-approx",
"--dae-ratio",
"0.5",
"--train-step",
"3",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"insertion_transformer",
[
"--apply-bert-init",
"--criterion",
"nat_loss",
"--noise",
"random_mask",
],
task="translation_lev",
)
generate_main(
data_dir,
[
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
],
)
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_moe") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
],
)
generate_main(
data_dir,
[
"--task",
"translation_moe",
"--user-dir",
"examples/translation_moe/translation_moe_src",
"--method",
"hMoElp",
"--mean-pool-gating-network",
"--num-experts",
"3",
"--gen-expert",
"0",
],
)
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_alignment_full_context(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_alignment") as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ["--align-suffix", "align"])
train_translation_model(
data_dir,
"transformer_align",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--load-alignments",
"--alignment-layer",
"1",
"--criterion",
"label_smoothed_cross_entropy_with_alignment",
"--full-context-alignment",
],
run_validation=True,
)
generate_main(data_dir)
def test_transformer_layerdrop(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"3",
"--decoder-layers",
"3",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--encoder-layerdrop",
"0.01",
"--decoder-layerdrop",
"0.01",
],
)
generate_main(data_dir)
generate_main(
data_dir,
[
"--model-overrides",
"{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}"
],
)
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
"--encoder-layers",
"[(128, 3)] * 2",
"--decoder-layers",
"[(128, 3)] * 2",
"--decoder-attention",
"True",
"--encoder-attention",
"False",
"--gated-attention",
"True",
"--self-attention",
"True",
"--project-input",
"True",
"--encoder-embed-dim",
"8",
"--decoder-embed-dim",
"8",
"--decoder-out-embed-dim",
"8",
"--multihead-self-attention-nheads",
"2",
]
train_translation_model(data_dir, "fconv_self_att_wp", config)
generate_main(data_dir)
# fusion model
os.rename(
os.path.join(data_dir, "checkpoint_last.pt"),
os.path.join(data_dir, "pretrained.pt"),
)
config.extend(
[
"--pretrained",
"True",
"--pretrained-checkpoint",
os.path.join(data_dir, "pretrained.pt"),
"--save-dir",
os.path.join(data_dir, "fusion_model"),
]
)
train_translation_model(data_dir, "fconv_self_att_wp", config)
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"fconv_lm",
[
"--decoder-layers",
"[(850, 3)] * 2 + [(1024,4)]",
"--decoder-embed-dim",
"280",
"--optimizer",
"nag",
"--lr",
"0.1",
],
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_transformer_lm_with_adaptive_softmax(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_lm_with_adaptive_softmax") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"transformer_lm",
[
"--add-bos-token",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lightconv_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
def test_lstm_lm_residuals(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir,
"lstm_lm",
["--add-bos-token", "--residuals"],
run_validation=True,
)
eval_lm_main(data_dir)
generate_main(
data_dir,
[
"--task",
"language_modeling",
"--sample-break-mode",
"eos",
"--tokens-per-sample",
"500",
],
)
@unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing")
def test_transformer_xl_bptt_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
task_flags = [
"--user-dir",
"examples/truncated_bptt",
"--task",
"truncated_bptt_lm",
"--batch-size",
"2",
"--tokens-per-sample",
"50",
]
train_language_model(
data_dir=data_dir,
arch="transformer_xl",
extra_flags=task_flags + [
"--n-layer",
"2",
],
task="truncated_bptt_lm",
run_validation=True,
extra_valid_flags=task_flags,
)
eval_lm_main(data_dir, extra_flags=task_flags)
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def test_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"]
)
def test_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes)
def test_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=["--regression-target"],
)
def test_linformer_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(
data_dir,
"linformer_roberta_base",
extra_flags=[
"--user-dir",
"examples/linformer/linformer_src",
"--encoder-layers",
"2",
],
)
def test_linformer_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=["--user-dir", "examples/linformer/linformer_src"],
)
def test_linformer_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_single"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def test_linformer_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_linformer_roberta_regression_multiple"
) as data_dir:
create_dummy_roberta_head_data(
data_dir, num_classes=num_classes, regression=True
)
preprocess_lm_data(os.path.join(data_dir, "input0"))
train_roberta_head(
data_dir,
"linformer_roberta_base",
num_classes=num_classes,
extra_flags=[
"--regression-target",
"--user-dir",
"examples/linformer/linformer_src",
],
)
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=("--encoder-learned-pos",) if learned_pos_emb else (),
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
]
+ (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb
else []
)
+ (["--init-encoder-only"] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def test_r4f_roberta(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, "input0"))
preprocess_lm_data(os.path.join(data_dir, "label"))
train_roberta_head(
data_dir,
"roberta_base",
num_classes=num_classes,
extra_flags=[
"--user-dir",
"examples/rxf/rxf_src",
"--criterion",
"sentence_prediction_r3f",
"--spectral-norm-classification-head",
],
)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--stop-min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
"--num-workers",
"0",
]
+ list(extra_args),
)
train.main(train_args)
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_optimizers") as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"]
last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt")
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(
data_dir,
"lstm",
[
"--required-batch-size-multiple",
"1",
"--encoder-layers",
"1",
"--encoder-hidden-size",
"32",
"--decoder-layers",
"1",
"--optimizer",
optimizer,
],
)
generate_main(data_dir)
def create_dummy_roberta_head_data(
data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False
):
input_dir = "input0"
def _create_dummy_data(filename):
random_data = torch.rand(num_examples * maxlen)
input_data = 97 + torch.floor(26 * random_data).int()
if regression:
output_data = torch.rand((num_examples, num_classes))
else:
output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int()
with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in:
label_filename = filename + ".label" if regression else filename + ".out"
with open(os.path.join(data_dir, "label", label_filename), "w") as f_out:
offset = 0
for i in range(num_examples):
# write example input
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, input_data[offset : offset + ex_len]))
print(ex_str, file=f_in)
# write example label
if regression:
class_str = " ".join(map(str, output_data[i].numpy()))
print(class_str, file=f_out)
else:
class_str = "class{}".format(output_data[i])
print(class_str, file=f_out)
offset += ex_len
os.mkdir(os.path.join(data_dir, input_dir))
os.mkdir(os.path.join(data_dir, "label"))
_create_dummy_data("train")
_create_dummy_data("valid")
_create_dummy_data("test")
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"masked_lm",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"masked_lm",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"sentence_prediction",
data_dir,
"--arch",
arch,
"--encoder-layers",
"2",
"--num-classes",
str(num_classes),
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"sentence_prediction",
"--max-tokens",
"500",
"--max-positions",
"500",
"--batch-size",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
def train_language_model(
data_dir, arch, extra_flags=None, run_validation=False, extra_valid_flags=None, task="language_modeling"
):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_valid_flags or []),
)
validate.main(validate_args)
def eval_lm_main(data_dir, extra_flags=None):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--no-progress-bar",
"--num-workers",
"0",
] + (extra_flags or []),
)
eval_lm.main(eval_lm_args)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_binaries.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
import torch
from fairseq.token_generation_constraints import *
def tensorize(constraints: List[List[int]]) -> torch.Tensor:
return [torch.tensor(x) for x in constraints]
class TestHelperRoutines(unittest.TestCase):
def setUp(self):
self.examples = [
([[]], torch.tensor([[0]])),
([[], []], torch.tensor([[0], [0]])),
([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),
(
[
[
torch.tensor([3, 1, 2]),
torch.tensor([3]),
torch.tensor([4, 5, 6, 7]),
],
[],
[torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],
],
torch.tensor(
[
[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],
]
),
),
]
def test_packing(self):
"""Ensures the list of lists of tensors gets packed correctly."""
for batch_constraints, expected_tensor in self.examples:
packed = pack_constraints(batch_constraints)
assert torch.equal(packed, expected_tensor)
class TestUnorderedConstraintState(unittest.TestCase):
def setUp(self):
# Tuples of (contraint set, expected printed graph, token counts per node)
self.examples = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))",
{1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},
),
([], "[None].False#0", {}),
(tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}),
(
tensorize([[100000, 1, 2, 3, 4, 5]]),
"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))",
{100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
),
(
tensorize([[1, 2], [1, 2]]),
"([None].False#2 ([1].False#2 [2].True#2))",
{1: 2, 2: 2},
),
(
tensorize([[1, 2], [3, 4]]),
"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))",
{1: 1, 2: 1, 3: 1, 4: 1},
),
]
self.sequences = [
(
self.examples[0][0],
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 94],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": False},
),
(
self.examples[0][0],
[1, 3, 999, 1, 4, 999],
{"bank": 4, "num_completed": 2, "finished": False, "is_root": True},
),
(
self.examples[0][0],
[4, 5, 6, 8],
{"bank": 2, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[0][0],
# Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5]
# [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]],
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
self.examples[0][0],
[1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": True},
),
(
tensorize([[1], [2, 3]]),
# Should not be able to get credit for entering 1 a second time
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": True},
),
(
self.examples[4][0],
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
self.examples[4][0],
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
(
self.examples[5][0],
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": True},
),
]
def test_graphs(self):
"""
Test whether unordered graph systems are created correctly.
"""
for example in self.examples:
constraints, expected, gold_counts = example
c = ConstraintNode.create(constraints)
assert (
ConstraintNode.print_graph(c) == expected
), f"got {ConstraintNode.print_graph(c)}, expected {expected}"
assert (
c.token_counts() == gold_counts
), f"{c} got {c.token_counts()} wanted {gold_counts}"
def test_next_tokens(self):
"""
Tests that the set of next tokens is correct.
"""
for example in self.examples:
constraints, expected, gold_counts = example
root = ConstraintNode.create(constraints)
root_tokens = set(root.children.keys())
for sequence in constraints:
state = UnorderedConstraintState(root)
for token in sequence:
all_tokens = root_tokens.union(state.node.children.keys())
assert (
all_tokens == state.next_tokens()
), f"ALL {all_tokens} NEXT {state.next_tokens()}"
state = state.advance(token)
def test_sequences(self):
for constraints, tokens, expected in self.sequences:
state = UnorderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
class TestOrderedConstraintState(unittest.TestCase):
def setUp(self):
self.sequences = [
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2],
{"bank": 2, "num_completed": 0, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 94],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 3, 999, 1, 4],
{"bank": 0, "num_completed": 0, "finished": False, "is_root": True},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 999, 999],
{"bank": 3, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 77, 1, 3, 1],
{"bank": 6, "num_completed": 2, "finished": False, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),
[1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],
{"bank": 14, "num_completed": 6, "finished": True, "is_root": False},
),
(
tensorize([[1], [2, 3]]),
[1, 1],
{"bank": 1, "num_completed": 1, "finished": False, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [1, 2]]),
[1, 2, 1, 2, 1],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
(
tensorize([[1, 2], [3, 4]]),
[1, 2, 3, 4, 5],
{"bank": 4, "num_completed": 2, "finished": True, "is_root": False},
),
]
def test_sequences(self):
for i, (constraints, tokens, expected) in enumerate(self.sequences):
state = OrderedConstraintState.create(pack_constraints([constraints])[0])
for token in tokens:
state = state.advance(token)
result = {}
for attr in expected.keys():
result[attr] = getattr(state, attr)
assert (
result == expected
), f"TEST({tokens}) GOT: {result} WANTED: {expected}"
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_constraints.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import tests.utils as test_utils
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
)
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, eos]),
},
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, w1, eos]),
},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.report_accuracy = False
self.args.probs = (
torch.FloatTensor(
[
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]
)
.unsqueeze(0)
.expand(2, 3, 7)
) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_label_smoothing.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
import sys
from io import StringIO
import torch
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.tasks import LegacyFairseqTask
from fairseq_cli import generate, interactive, preprocess, train, validate
def dummy_dictionary(vocab_size, prefix="token_"):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if "id" not in sample:
sample["id"] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[
0.6,
unk,
0.2,
0.2,
], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[
0.60,
unk,
0.4,
0.00,
], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]
),
# step 3:
torch.FloatTensor(
[
# eos w1 w2 prefix
# sentence 1:
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[
1.0,
unk,
0.0,
0.0,
], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[
0.1,
unk,
0.5,
0.4,
], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[
1.0,
unk,
0.0,
0.0,
], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]
),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), "w") as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = " ".join(map(chr, data[offset : offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), "r") as src_f, open(
os.path.join(data_dir, filename_tgt), "r"
) as tgt_f, open(os.path.join(data_dir, filename), "w") as h:
for src, tgt in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = (src_len + tgt_len) // 2
num_alignments = random.randint(avg_len // 2, 2 * avg_len)
src_indices = torch.floor(torch.rand(num_alignments) * src_len).int()
tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int()
ex_str = " ".join(
[
"{}-{}".format(src, tgt)
for src, tgt in zip(src_indices, tgt_indices)
]
)
print(ex_str, file=h)
_create_dummy_data("train.in")
_create_dummy_data("train.out")
_create_dummy_data("valid.in")
_create_dummy_data("valid.out")
_create_dummy_data("test.in")
_create_dummy_data("test.out")
if alignment:
_create_dummy_alignment_data("train.in", "train.out", "train.align")
_create_dummy_alignment_data("valid.in", "valid.out", "valid.align")
_create_dummy_alignment_data("test.in", "test.out", "test.align")
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--only-source",
"--trainpref",
os.path.join(data_dir, "train.out"),
"--validpref",
os.path.join(data_dir, "valid.out"),
"--testpref",
os.path.join(data_dir, "test.out"),
"--destdir",
data_dir,
]
)
preprocess.main(preprocess_args)
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--source-lang",
"in",
"--target-lang",
"out",
"--trainpref",
os.path.join(data_dir, "train"),
"--validpref",
os.path.join(data_dir, "valid"),
"--testpref",
os.path.join(data_dir, "test"),
"--thresholdtgt",
"0",
"--thresholdsrc",
"0",
"--destdir",
data_dir,
]
+ (extra_flags or []),
)
preprocess.main(preprocess_args)
def preprocess_summarization_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
"--source-lang",
"in",
"--target-lang",
"out",
"--trainpref",
os.path.join(data_dir, "train"),
"--validpref",
os.path.join(data_dir, "valid"),
"--testpref",
os.path.join(data_dir, "test"),
"--thresholdtgt",
"0",
"--thresholdsrc",
"0",
"--joined-dictionary",
"--destdir",
data_dir,
]
+ (extra_flags or []),
)
preprocess.main(preprocess_args)
def train_translation_model(
data_dir,
arch,
extra_flags=None,
task="translation",
run_validation=False,
lang_flags=None,
extra_valid_flags=None,
):
if lang_flags is None:
lang_flags = [
"--source-lang",
"in",
"--target-lang",
"out",
]
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
task,
data_dir,
"--save-dir",
data_dir,
"--arch",
arch,
"--optimizer",
"nag",
"--lr",
"0.05",
"--max-tokens",
"500",
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--num-workers",
"0",
]
+ lang_flags
+ (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
"--task",
task,
data_dir,
"--path",
os.path.join(data_dir, "checkpoint_last.pt"),
"--valid-subset",
"valid",
"--max-tokens",
"500",
"--no-progress-bar",
"--num-workers",
"0",
]
+ lang_flags
+ (extra_valid_flags or []),
)
validate.main(validate_args)
def generate_main(data_dir, extra_flags=None, path=None):
if extra_flags is None:
extra_flags = [
"--print-alignment",
]
if path is None:
path = os.path.join(data_dir, "checkpoint_last.pt")
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
"--path",
path,
"--beam",
"3",
"--batch-size",
"64",
"--max-len-b",
"5",
"--gen-subset",
"valid",
"--no-progress-bar",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.input = "-"
generate_args.batch_size = None
orig_stdin = sys.stdin
sys.stdin = StringIO("h e l l o\n")
interactive.main(generate_args)
sys.stdin = orig_stdin
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
self.sizes = None
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(LegacyFairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, "beam_probs") or hasattr(args, "probs")
args.max_decoder_positions = getattr(args, "max_decoder_positions", 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, "step")
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, "step", step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, "probs"):
assert (
self.args.probs.dim() == 3
), "expected probs to have size bsz*steps*vocab"
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
dev = prev_output_tokens.device
return probs.to(dev), {"attn": [attn.to(dev)]}
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
class TestReshapingEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
b_sz, t_sz = src_tokens.shape
padding_needed = t_sz % 2
x = src_tokens
if padding_needed > 0:
padding_needed = 2 - padding_needed
x = F.pad(x, (0, padding_needed))
return EncoderOut(
encoder_out=x.view(b_sz, -1, 2),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestReshapingModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestReshapingEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestAdditionalInputEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
assert "fancy_other_input" in kwargs
assert kwargs["fancy_other_input"] is not None
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
src_tokens=None,
src_lengths=None,
)
class TestAdditionalInputModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestAdditionalInputEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
| data2vec_vision-main | deltalm/src/tests/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.nn as nn
from fairseq.modules import ConvTBC
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(
output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data
)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(
conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data
)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(
input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data
)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_convtbc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import unittest
import torch
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"optimization": {
"lr": [0.1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"fp16_init_scale": 1,
"fp16_scale_window": 1,
"fp16_scale_tolerance": 1,
"threshold_loss_scale": 1,
"min_loss_scale": 1e-4,
"tpu": False,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_fp16_optimizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from fairseq.optim.adam import FairseqAdam
from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
# with pseudo DictConfigs
optimizer = FairseqAdam(
cfg=OmegaConf.create(
vars(
argparse.Namespace(
adam_betas="(0.9, 0.999)",
adam_eps=1e-8,
weight_decay=0.0,
lr=[0.00001],
)
)
),
params=params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
cfg=OmegaConf.create(
{
"common": vars(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
)
)
}
),
params=params,
optimizer=optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_memory_efficient_fp16.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import TokenBlockDataset
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos")
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none")
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=6, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(
data, block_size=3, pad=0, eos=1, break_mode="complete"
)
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_token_block_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import tests.utils as test_utils
import torch
from fairseq import search
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.sequence_generator import EnsembleModel, SequenceGenerator
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitSequenceGeneratorBase(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
eos = self.task.tgt_dict.eos()
src_tokens = torch.randint(3, 50, (2, 10)).long()
src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1)
src_lengths = torch.LongTensor([2, 10])
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
TransformerModel.add_args(self.parser)
args = self.parser.parse_args([])
args.encoder_layers = 2
args.decoder_layers = 1
self.transformer_model = TransformerModel.build_model(args, self.task)
def assertOutputEqual(self, hypo, pos_probs):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores)
self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel())
def assertTensorSizeEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def assertHypoEqual(self, h1, h2):
"Check two hypos are equal"
self.assertTensorEqual(h1["tokens"], h2["tokens"])
self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"])
self.assertLess(abs(h1["score"] - h2["score"]), 1e-6)
self.assertAlmostEqual(h1["attention"], h2["attention"])
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
class TestJitSequeneceGenerator(TestJitSequenceGeneratorBase):
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_export_transformer(self):
model = self.transformer_model
torch.jit.script(model)
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_ensemble_sequence_generator(self):
model = self.transformer_model
generator = SequenceGenerator(
[model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2
)
scripted_model = torch.jit.script(generator)
self._test_save_and_load(scripted_model)
class TestJitEnsemble(TestJitSequenceGeneratorBase):
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_export_ensemble_model(self):
model = self.transformer_model
ensemble_models = EnsembleModel([model])
torch.jit.script(ensemble_models)
class TestExportSearch(unittest.TestCase):
def setUp(self):
task, _ = get_dummy_task_and_parser()
self.tgt_dict = task.tgt_dict
self.min_top1_prob = 0.4
def test_export_diverse_bs(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
torch.jit.script(search_strategy)
def test_export_sampling(self):
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
torch.jit.script(search_strategy)
def test_export_diverse_siblings_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
torch.jit.script(search_strategy)
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequeneceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
src_tokens,
src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, normalize_scores=False
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_encoder_with_different_output_len(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
reshaping_model = test_utils.TestReshapingModel.build_model(args, task)
generator = SequenceGenerator(
[reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
for sent in [0, 1]:
for beam in [0, 1]:
assert hypos[sent][beam]["attention"] is not None
def test_generation_with_additional_input(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task)
generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2)
sample = self.sample.copy()
sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"]
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
generator = SequenceGenerator(
[self.model],
self.tgt_dict,
beam_size=2,
search_strategy=search_strategy,
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(
self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0
):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate)
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5)
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=high_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(
self.hypoTokens(hypos[0][0], [w1, w1, eos])
or self.hypoTokens(hypos[0][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])
)
# sentence 1, beam 2
self.assertTrue(
self.hypoTokens(hypos[0][1], [w1, w1, eos])
or self.hypoTokens(hypos[0][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])
)
# sentence 2, beam 1
self.assertTrue(
self.hypoTokens(hypos[1][0], [w1, w1, eos])
or self.hypoTokens(hypos[1][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])
)
# sentence 2, beam 2
self.assertTrue(
self.hypoTokens(hypos[1][1], [w1, w1, eos])
or self.hypoTokens(hypos[1][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])
)
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo["positional_scores"], pos_scores):
return False
if pos_scores.numel() != hypo["tokens"].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
return abs(score - hypo["score"]) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
from tests.test_train import mock_dict
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def test_concat_dataset_basics(self):
d = ConcatDataset([self.dataset_1, self.dataset_2])
assert len(d) == 2
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 2
assert d[2]["source"][0] == 2
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1])
assert len(d) == 3
assert d[0]["source"][0] == 1
assert d[1]["source"][0] == 1
assert d[2]["source"][0] == 2
| data2vec_vision-main | deltalm/src/tests/test_concat_dataset.py |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import sys
import tempfile
import unittest
from typing import Optional
from unittest.mock import MagicMock
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = "Hello, World"
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def test_file_io(self):
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
# Mock fvcore to simulate oss environment.
sys.modules["fvcore"] = MagicMock()
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
| data2vec_vision-main | deltalm/src/tests/test_file_io.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import os
import tempfile
import unittest
from io import StringIO
import torch
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint="checkpoint1.pt",
max_epoch=3,
):
def get_last_log_stats_containing_string(log_records, search_string):
for log_record in logs.records[::-1]:
if search_string in log_record.msg:
return json.loads(log_record.msg)
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
]
+ extra_flags,
)
train_log = get_last_log_stats_containing_string(logs.records, "train_loss")
valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss")
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, "checkpoint_last.pt"),
)
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir,
"fconv_iwslt_de_en",
[
"--dropout",
"0.0",
"--log-format",
"json",
"--log-interval",
"1",
"--max-epoch",
str(max_epoch),
]
+ extra_flags,
)
train_res_log = get_last_log_stats_containing_string(
logs.records, "train_loss"
)
valid_res_log = get_last_log_stats_containing_string(
logs.records, "valid_loss"
)
for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]:
self.assertAlmostEqual(
float(train_log[k]), float(train_res_log[k]), delta=delta
)
for k in [
"valid_loss",
"valid_ppl",
"valid_num_updates",
"valid_best_loss",
]:
self.assertAlmostEqual(
float(valid_log[k]), float(valid_res_log[k]), delta=delta
)
def test_reproducibility(self):
self._test_reproducibility("test_reproducibility")
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_fp16(self):
self._test_reproducibility(
"test_reproducibility_fp16",
[
"--fp16",
"--fp16-init-scale",
"4096",
],
delta=0.011,
)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility(
"test_reproducibility_memory_efficient_fp16",
[
"--memory-efficient-fp16",
"--fp16-init-scale",
"4096",
],
)
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
"test_mid_epoch_reproducibility",
["--save-interval-updates", "3"],
resume_checkpoint="checkpoint_1_3.pt",
max_epoch=1,
)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_reproducibility.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import Dictionary
from fairseq.modules import CharacterTokenEmbedder
class TestCharacterTokenEmbedder(unittest.TestCase):
def test_character_token_embedder(self):
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("there")
embedder = CharacterTokenEmbedder(
vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2
)
test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]]
max_len = max(len(s) for s in test_sents)
input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad())
for i in range(len(test_sents)):
input[i][0] = vocab.eos()
for j in range(len(test_sents[i])):
input[i][j + 1] = vocab.index(test_sents[i][j])
input[i][j + 2] = vocab.eos()
embs = embedder(input)
assert embs.size() == (len(test_sents), max_len + 2, 5)
self.assertAlmostEqual(embs[0][0], embs[1][0])
self.assertAlmostEqual(embs[0][0], embs[0][-1])
self.assertAlmostEqual(embs[0][1], embs[2][1])
self.assertAlmostEqual(embs[0][3], embs[1][1])
embs.sum().backward()
assert embedder.char_embeddings.weight.grad is not None
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_character_token_embedder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
import shutil
import tempfile
import unittest
import numpy as np
import torch
from scripts.average_checkpoints import average_checkpoints
from torch import nn
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
# tie weight in FC2 to FC1
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input)
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict(
[
("a", torch.DoubleTensor([100.0])),
("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])),
("c", torch.IntTensor([7, 8, 9])),
]
)
params_1 = collections.OrderedDict(
[
("a", torch.DoubleTensor([1.0])),
("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
("c", torch.IntTensor([2, 2, 2])),
]
)
params_avg = collections.OrderedDict(
[
("a", torch.DoubleTensor([50.5])),
("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])),
# We expect truncation for integer division
("c", torch.IntTensor([4, 5, 5])),
]
)
fd_0, path_0 = tempfile.mkstemp()
fd_1, path_1 = tempfile.mkstemp()
torch.save(collections.OrderedDict([("model", params_0)]), path_0)
torch.save(collections.OrderedDict([("model", params_1)]), path_1)
output = average_checkpoints([path_0, path_1])["model"]
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for (k_expected, v_expected), (k_out, v_out) in zip(
params_avg.items(), output.items()
):
self.assertEqual(
k_expected,
k_out,
"Key mismatch - expected {} but found {}. "
"(Expected list of keys: {} vs actual list of keys: {})".format(
k_expected, k_out, params_avg.keys(), output.keys()
),
)
np.testing.assert_allclose(
v_expected.numpy(),
v_out.numpy(),
err_msg="Tensor value mismatch for key {}".format(k_expected),
)
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save({"model": m.state_dict()}, path)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, "m1.pt")
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, "m2.pt")
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, "m3.pt")
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(
torch.equal(
new_model["model"]["embedding.weight"],
(m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0,
)
)
self.assertTrue(
torch.equal(
new_model["model"]["FC1.weight"],
(m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0,
)
)
self.assertTrue(
torch.equal(
new_model["model"]["FC2.weight"],
(m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0,
)
)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_average_checkpoints.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.modules import multihead_attention, sinusoidal_positional_embedding
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
Return a dummy task and argument parser, which can be used to
create a model/criterion.
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def _test_save_and_load(scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
class TestExportModels(unittest.TestCase):
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])})
state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])})
v1 = module1.get_incremental_state(state, "key")["a"]
v2 = module2.get_incremental_state(state, "key")["a"]
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(
embedding_dim=8, padding_idx=1
)
scripted = torch.jit.script(module)
_test_save_and_load(scripted)
@unittest.skipIf(
torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release"
)
def test_export_transformer(self):
task, parser = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
_test_save_and_load(scripted)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/test_export.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import OrderedDict
import numpy as np
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from tests.test_train import mock_dict
class TestMultiCorpusSampledDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def _test_sample_helper(
self,
expected_sample_from_first_ds_percentage,
num_samples=1000,
sampling_func=None,
):
# To make sure test is not flaky
np.random.seed(0)
if sampling_func is None:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
)
else:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
sampling_func=sampling_func,
)
m.ordered_indices()
count_sample_from_first_dataset = 0
for _ in range(num_samples):
if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1:
count_sample_from_first_dataset += 1
sample_from_first_ds_percentage = (
1.0 * count_sample_from_first_dataset / num_samples
)
self.assertLess(
abs(
sample_from_first_ds_percentage
- expected_sample_from_first_ds_percentage
),
0.01,
)
def test_multi_corpus_sampled_dataset_uniform_sample(self):
self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5)
def test_multi_corpus_sampled_dataset_weighted_sample(self):
def naive_weighted_sample(weights):
def f(l):
v = np.random.random()
agg = 0
for i, weight in enumerate(weights):
agg += weight
if agg > v:
return i
return f
self._test_sample_helper(
expected_sample_from_first_ds_percentage=0.9,
sampling_func=naive_weighted_sample(weights=[0.9, 0.1]),
)
| data2vec_vision-main | deltalm/src/tests/test_multi_corpus_sampled_dataset.py |
data2vec_vision-main | deltalm/src/tests/gpu/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
import tempfile
import unittest
from io import StringIO
import torch
from fairseq import options
from fairseq_cli import train
from tests.utils import (
create_dummy_data,
generate_main,
preprocess_lm_data,
preprocess_translation_data,
train_translation_model,
)
class TestTranslationGPU(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_fp16") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, "fconv_iwslt_de_en", ["--fp16"])
generate_main(data_dir)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"]
)
generate_main(data_dir)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_transformer_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_transformer") as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
"transformer_iwslt_de_en",
[
"--encoder-layers",
"2",
"--decoder-layers",
"2",
"--encoder-embed-dim",
"64",
"--decoder-embed-dim",
"64",
"--fp16",
],
run_validation=True,
)
generate_main(data_dir)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory(
"test_levenshtein_transformer"
) as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ["--joined-dictionary"])
train_translation_model(
data_dir,
"levenshtein_transformer",
[
"--apply-bert-init",
"--early-exit",
"6,6,6",
"--criterion",
"nat_loss",
],
task="translation_lev",
)
gen_config = [
"--task",
"translation_lev",
"--iter-decode-max-iter",
"9",
"--iter-decode-eos-penalty",
"0",
"--print-step",
]
# non-ensemble generation
generate_main(data_dir, gen_config)
# ensemble generation
generate_main(
data_dir,
gen_config,
path=os.pathsep.join([
os.path.join(data_dir, "checkpoint_last.pt"),
os.path.join(data_dir, "checkpoint_last.pt"),
]),
)
def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"language_modeling",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
]
+ (extra_flags or []),
)
train.main(train_args)
# try scalar quantization
scalar_quant_train_parser = options.get_training_parser()
scalar_quant_train_args = options.parse_args_and_arch(
scalar_quant_train_parser,
[
"--task",
"language_modeling",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-update",
"3",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
"--quant-noise-scalar",
"0.5",
]
+ (extra_flags or []),
)
train.main(scalar_quant_train_args)
# try iterative PQ quantization
quantize_parser = options.get_training_parser()
quantize_args = options.parse_args_and_arch(
quantize_parser,
[
"--task",
"language_modeling",
data_dir,
"--arch",
arch,
"--optimizer",
"adam",
"--lr",
"0.0001",
"--criterion",
"adaptive_loss",
"--adaptive-softmax-cutoff",
"5,10,15",
"--max-tokens",
"50",
"--tokens-per-sample",
"50",
"--max-update",
"6",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--ddp-backend",
"no_c10d",
"--num-workers",
"0",
"--restore-file",
os.path.join(data_dir, "checkpoint_last.pt"),
"--reset-optimizer",
"--quantization-config-path",
os.path.join(
os.path.dirname(__file__), "transformer_quantization_config.yaml"
),
]
+ (extra_flags or []),
)
train.main(quantize_args)
class TestQuantization(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_quantization(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_quantization") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
# tests both scalar and iterative PQ quantization
_quantize_language_model(data_dir, "transformer_lm")
class TestOptimizersGPU(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
def test_flat_grads(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_flat_grads") as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
with self.assertRaises(RuntimeError):
# adafactor isn't compatible with flat grads, which
# are used by default with --fp16
train_translation_model(
data_dir,
"lstm",
[
"--required-batch-size-multiple",
"1",
"--encoder-layers",
"1",
"--encoder-hidden-size",
"32",
"--decoder-layers",
"1",
"--optimizer",
"adafactor",
"--fp16",
],
)
# but it should pass once we set --fp16-no-flatten-grads
train_translation_model(
data_dir,
"lstm",
[
"--required-batch-size-multiple",
"1",
"--encoder-layers",
"1",
"--encoder-hidden-size",
"32",
"--decoder-layers",
"1",
"--optimizer",
"adafactor",
"--fp16",
"--fp16-no-flatten-grads",
],
)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/gpu/test_binaries_gpu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import sys
import unittest
import torch
from fairseq import distributed_utils as dist_utils
from .utils import objects_are_equal, spawn_and_init
class TestDistributedUtils(unittest.TestCase):
def setUp(self):
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
def test_broadcast_object_python(self):
spawn_and_init(
functools.partial(
TestDistributedUtils._test_broadcast_object,
"hello world",
),
world_size=2,
)
def test_broadcast_object_tensor(self):
spawn_and_init(
functools.partial(
TestDistributedUtils._test_broadcast_object,
torch.rand(5),
),
world_size=2,
)
def test_broadcast_object_complex(self):
spawn_and_init(
functools.partial(
TestDistributedUtils._test_broadcast_object,
{
"a": "1",
"b": [2, torch.rand(2, 3), 3],
"c": (torch.rand(2, 3), 4),
"d": {5, torch.rand(5)},
"e": torch.rand(5),
"f": torch.rand(5).int().cuda(),
},
),
world_size=2,
)
@staticmethod
def _test_broadcast_object(ref_obj, rank, group):
obj = dist_utils.broadcast_object(
ref_obj if rank == 0 else None, src_rank=0, group=group
)
assert objects_are_equal(ref_obj, obj)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/distributed/test_distributed_utils.py |
data2vec_vision-main | deltalm/src/tests/distributed/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import tempfile
import torch
def spawn_and_init(fn, world_size, args=None):
if args is None:
args = ()
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
torch.multiprocessing.spawn(
fn=functools.partial(init_and_run, fn, args),
args=(world_size, tmp_file.name,),
nprocs=world_size,
)
def distributed_init(rank, world_size, tmp_file):
torch.distributed.init_process_group(
backend="nccl",
init_method="file://{}".format(tmp_file),
world_size=world_size,
rank=rank,
)
torch.cuda.set_device(rank)
def init_and_run(fn, args, rank, world_size, tmp_file):
distributed_init(rank, world_size, tmp_file)
group = torch.distributed.new_group()
fn(rank, group, *args)
def objects_are_equal(a, b) -> bool:
if type(a) is not type(b):
return False
if isinstance(a, dict):
if set(a.keys()) != set(b.keys()):
return False
for k in a.keys():
if not objects_are_equal(a[k], b[k]):
return False
return True
elif isinstance(a, (list, tuple, set)):
if len(a) != len(b):
return False
return all(objects_are_equal(x, y) for x, y in zip(a, b))
elif torch.is_tensor(a):
return (
a.size() == b.size()
and a.dtype == b.dtype
and a.device == b.device
and torch.all(a == b)
)
else:
return a == b
| data2vec_vision-main | deltalm/src/tests/distributed/utils.py |
#!/usr/bin/env python3
import argparse
import os
import unittest
from inspect import currentframe, getframeinfo
import numpy as np
import torch
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.data import data_utils as fairseq_data_utils
from fairseq.data.dictionary import Dictionary
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqModel,
)
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
# ///////////////////////////////////////////////////////////////////////////
# utility function to setup dummy dict/task/input
# ///////////////////////////////////////////////////////////////////////////
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
# T max sequence length
# D feature vector dimension
# B batch size
# K target dimension size
feature = torch.randn(B, T, D)
# this (B, T, D) layout is just a convention, you can override it by
# write your own _prepare_forward_input function
src_lengths = torch.from_numpy(
np.random.randint(low=1, high=T, size=B, dtype=np.int64)
)
src_lengths[0] = T # make sure the maximum length matches
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(
prev_output_tokens,
pad_idx=1,
eos_idx=2,
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths, sorted_order = src_lengths.sort(descending=True)
forward_input["src_tokens"] = feature.index_select(0, sorted_order)
forward_input["src_lengths"] = src_lengths
forward_input["prev_output_tokens"] = prev_output_tokens
return forward_input
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
"""
This only provides an example to generate dummy encoder output
"""
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out["encoder_out"] = torch.from_numpy(
np.random.randn(*encoder_out_shape).astype(np.float32)
)
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
# some dummy mask
encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand(
B, -1
) >= seq_lengths.view(B, 1).expand(-1, T)
encoder_out["encoder_padding_mask"].t_()
# encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate
# whether encoder_out[t, b] is valid (=0) or not (=1)
return encoder_out
def _current_postion_info():
cf = currentframe()
frameinfo = " (at {}:{})".format(
os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
)
return frameinfo
def check_encoder_output(encoder_output, batch_size=None):
"""we expect encoder_output to be a dict with the following
key/value pairs:
- encoder_out: a Torch.Tensor
- encoder_padding_mask: a binary Torch.Tensor
"""
if not isinstance(encoder_output, dict):
msg = (
"FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info()
)
return False, msg
if "encoder_out" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_out"
+ _current_postion_info()
)
return False, msg
if "encoder_padding_mask" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_padding_mask"
+ _current_postion_info()
)
return False, msg
if not isinstance(encoder_output["encoder_out"], torch.Tensor):
msg = "encoder_out must be a torch.Tensor" + _current_postion_info()
return False, msg
if encoder_output["encoder_out"].dtype != torch.float32:
msg = "encoder_out must have float32 dtype" + _current_postion_info()
return False, msg
mask = encoder_output["encoder_padding_mask"]
if mask is not None:
if not isinstance(mask, torch.Tensor):
msg = (
"encoder_padding_mask must be a torch.Tensor" + _current_postion_info()
)
return False, msg
if mask.dtype != torch.uint8 and (
not hasattr(torch, "bool") or mask.dtype != torch.bool
):
msg = (
"encoder_padding_mask must have dtype of uint8"
+ _current_postion_info()
)
return False, msg
if mask.dim() != 2:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)"
+ _current_postion_info()
)
return False, msg
if batch_size is not None and mask.size(1) != batch_size:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, with size(1)"
+ " being the batch size"
+ _current_postion_info()
)
return False, msg
return True, None
def check_decoder_output(decoder_output):
"""we expect output from a decoder is a tuple with the following constraint:
- the first element is a torch.Tensor
- the second element can be anything (reserved for future use)
"""
if not isinstance(decoder_output, tuple):
msg = "FariseqDecoder output must be a tuple" + _current_postion_info()
return False, msg
if len(decoder_output) != 2:
msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info()
return False, msg
if not isinstance(decoder_output[0], torch.Tensor):
msg = (
"FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info()
)
return False, msg
return True, None
# ///////////////////////////////////////////////////////////////////////////
# Base Test class
# ///////////////////////////////////////////////////////////////////////////
class TestBaseFairseqModelBase(unittest.TestCase):
"""
This class is used to facilitate writing unittest for any class derived from
`BaseFairseqModel`.
"""
@classmethod
def setUpClass(cls):
if cls is TestBaseFairseqModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
"""
base code to test FairseqEncoderDecoderModel (formally known as
`FairseqModel`) must be derived from this base class
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderDecoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),
msg="This class only tests for FairseqModel subclasses",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
def setUp(self):
super().setUp()
def test_forward(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
# for FairseqEncoderDecoderModel, forward returns a tuple of two
# elements, the first one is a Torch.Tensor
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
"""
base class to test FairseqEncoderModel
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, FairseqEncoderModel),
msg="This class is only used for testing FairseqEncoderModel",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
super().setUp()
def test_forward(self):
if self.forward_input and self.model:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.model.forward(**self.forward_input)
# we expect forward_output to be a dict with the following
# key/value pairs:
# - encoder_out: a Torch.Tensor
# - encoder_padding_mask: a binary Torch.Tensor
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderBase(unittest.TestCase):
"""
base class to test FairseqEncoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(
isinstance(encoder, FairseqEncoder),
msg="This class is only used for test FairseqEncoder",
)
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if self.encoder and self.forward_input:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.encoder.forward(**self.forward_input)
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
class TestFairseqDecoderBase(unittest.TestCase):
"""
base class to test FairseqDecoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqDecoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(
isinstance(decoder, FairseqDecoder),
msg="This class is only used for test FairseqDecoder",
)
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_encoder_output() if input is None else input
def setUpPrevOutputTokens(self, tokens=None):
if tokens is None:
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input["prev_output_tokens"]
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if (
self.decoder is not None
and self.forward_input is not None
and self.prev_output_tokens is not None
):
forward_output = self.decoder.forward(
prev_output_tokens=self.prev_output_tokens,
encoder_out=self.forward_input,
)
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
# Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as
# F.binary_cross_entropy_with_logits combines sigmoid and CE
return torch.log(
torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"])
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)
lprobs.batch_first = True
return lprobs
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
mask, max_len = lengths_to_encoder_padding_mask(src_lengths)
return {"encoder_out": src_tokens, "encoder_padding_mask": mask}
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls is CrossEntropyCriterionTestBase:
raise unittest.SkipTest("Skipping base class test case")
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls.build_criterion(args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
"""
correct_prediction: True if the net_output (src_tokens) should
predict the correct target
aggregate: True if the criterion expects net_output (src_tokens)
aggregated across time axis
"""
predicted_idx = 0 if correct_prediction else 1
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {
"net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])},
"target": target,
"ntokens": src_tokens.size(0) * src_tokens.size(1),
}
| data2vec_vision-main | deltalm/src/tests/speech_recognition/asr_test_base.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from examples.speech_recognition.data.collaters import Seq2SeqCollater
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(
feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx
)
# 2 frames in the first sample and 3 frames in the second one
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {"id": 0, "data": [frames1, target1]}
sample2 = {"id": 1, "data": [frames2, target2]}
batch = collater.collate([sample1, sample2])
# collate sort inputs by frame's length before creating the batch
self.assertTensorEqual(batch["id"], torch.tensor([1, 0]))
self.assertEqual(batch["ntokens"], 7)
self.assertTensorEqual(
batch["net_input"]["src_tokens"],
torch.tensor(
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]
),
)
self.assertTensorEqual(
batch["net_input"]["prev_output_tokens"],
torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]),
)
self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2]))
self.assertTensorEqual(
batch["target"],
torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]),
)
self.assertEqual(batch["nsentences"], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| data2vec_vision-main | deltalm/src/tests/speech_recognition/test_collaters.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from examples.speech_recognition.criterions.cross_entropy_acc import (
CrossEntropyWithAccCriterion,
)
from .asr_test_base import CrossEntropyCriterionTestBase
class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase):
def setUp(self):
self.criterion_cls = CrossEntropyWithAccCriterion
super().setUp()
def test_cross_entropy_all_correct(self):
sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 20
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
def test_cross_entropy_all_wrong(self):
sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 0
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
| data2vec_vision-main | deltalm/src/tests/speech_recognition/test_cross_entropy.py |
#!/usr/bin/env python3
# import models/encoder/decoder to be tested
from examples.speech_recognition.models.vggtransformer import (
TransformerDecoder,
VGGTransformerEncoder,
VGGTransformerModel,
vggtransformer_1,
vggtransformer_2,
vggtransformer_base,
)
# import base test class
from .asr_test_base import (
DEFAULT_TEST_VOCAB_SIZE,
TestFairseqDecoderBase,
TestFairseqEncoderBase,
TestFairseqEncoderDecoderModelBase,
get_dummy_dictionary,
get_dummy_encoder_output,
get_dummy_input,
)
class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_1 use 14 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_1, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_2 use 16 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_2, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_base use 12 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_base, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerEncoderTest(TestFairseqEncoderBase):
def setUp(self):
super().setUp()
self.setUpInput(get_dummy_input(T=50, D=80, B=5))
def test_forward(self):
print("1. test standard vggtransformer")
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
super().test_forward()
print("2. test vggtransformer with limited right context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(-1, 5)
)
)
super().test_forward()
print("3. test vggtransformer with limited left context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(5, -1)
)
)
super().test_forward()
print("4. test vggtransformer with limited right context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(-1, 12),
transformer_sampling=(2, 2),
)
)
super().test_forward()
print("5. test vggtransformer with windowed context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(12, 12),
transformer_sampling=(2, 2),
)
)
class TransformerDecoderTest(TestFairseqDecoderBase):
def setUp(self):
super().setUp()
dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
decoder = TransformerDecoder(dict)
dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
self.setUpDecoder(decoder)
self.setUpInput(dummy_encoder_output)
self.setUpPrevOutputTokens()
| data2vec_vision-main | deltalm/src/tests/speech_recognition/test_vggtransformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from examples.speech_recognition.data import data_utils
class DataUtilsTest(unittest.TestCase):
def test_normalization(self):
sample_len1 = torch.tensor(
[
[
-0.7661,
-1.3889,
-2.0972,
-0.9134,
-0.7071,
-0.9765,
-0.8700,
-0.8283,
0.7512,
1.3211,
2.1532,
2.1174,
1.2800,
1.2633,
1.6147,
1.6322,
2.0723,
3.1522,
3.2852,
2.2309,
2.5569,
2.2183,
2.2862,
1.5886,
0.8773,
0.8725,
1.2662,
0.9899,
1.1069,
1.3926,
1.2795,
1.1199,
1.1477,
1.2687,
1.3843,
1.1903,
0.8355,
1.1367,
1.2639,
1.4707,
]
]
)
out = data_utils.apply_mv_norm(sample_len1)
assert not torch.isnan(out).any()
assert (out == sample_len1).all()
| data2vec_vision-main | deltalm/src/tests/speech_recognition/test_data_utils.py |
data2vec_vision-main | deltalm/src/tests/speech_recognition/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
import copy
from collections import OrderedDict
from contextlib import contextmanager
import torch
from torch import nn
from torch.autograd import Variable
from . import distributed_utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, process_group, buffer_size=2 ** 28):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = distributed_utils.get_world_size(self.process_group)
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# make per-device lists of parameters
paramlists = OrderedDict()
for param in self.module.parameters():
device = param.device
if paramlists.get(device) is None:
paramlists[device] = []
paramlists[device] += [param]
self.per_device_params = list(paramlists.values())
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
"""
This function must be called explicitly after backward to reduce
gradients. There is no automatic hook like c10d.
"""
def all_reduce_params(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset : offset + sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset : offset + sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[: p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset : offset + sz].view_as(p))
else:
p.grad = buffer[offset : offset + sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if self.accumulate_grads:
return
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
for params in self.per_device_params:
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in params:
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError(
"DistributedDataParallel only works "
"with gradients that don't require "
"grad"
)
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce_params([param])
else:
if offset + sz > self.buffer.numel():
all_reduce_params(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce_params(buffered_params)
reduction_fn()
| data2vec_vision-main | deltalm/src/fairseq/legacy_distributed_data_parallel.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| data2vec_vision-main | deltalm/src/fairseq/options.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import numpy as np
import torch
from fairseq import utils
DecoderOut = namedtuple(
"IterativeRefinementDecoderOut",
["output_tokens", "output_scores", "attn", "step", "max_step", "history"],
)
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the IterativeRefinementGenerator is not supported"
)
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert (
self.beam_size > 1
), "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, "enable_ensemble"):
assert model.allow_ensemble, "{} does not support ensembling".format(
model.__class__.__name__
)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert (
model.allow_length_beam
), "{} does not support decoding with length beam.".format(
model.__class__.__name__
)
# regenerate data based on length-beam
length_beam_order = (
utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, length_beam_order
)
prev_decoder_out = model.regenerate_length_beam(
prev_decoder_out, self.beam_size
)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens,
decoder_out.output_tokens,
decoder_out.output_scores,
decoder_out.attn,
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(
decoder_out.output_tokens.size(0)
).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None
if (decoder_out.attn is None or decoder_out.attn.size(0) == 0)
else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]["history"] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]["history"].append(
finalized_hypos(
step, finalized_history_tokens[j][i], None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(
encoder_out, not_terminated.nonzero(as_tuple=False).squeeze()
)
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[
np.argmax(
[
finalized[self.beam_size * i + j][0]["score"]
for j in range(self.beam_size)
]
)
+ self.beam_size * i
]
for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]["tokens"] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = (
finalized_tokens[0]
.new_zeros(len(finalized_tokens), finalized_maxlen)
.fill_(self.pad)
)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, : f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[
:, 0
] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = (
utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)
)
.t()
.reshape(-1)
)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(
reranker_encoder_out, length_beam_order
)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out),
True,
None,
)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = (
reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(
reranking_scores
)
for i in range(len(finalized)):
finalized[i][0]["score"] = reranking_scores[i]
return finalized
| data2vec_vision-main | deltalm/src/fairseq/iterative_refinement_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.named_parameters = list(model.named_parameters())
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# Dump out all model gnorms to enable better debugging
norm = {}
gradients = {}
for name, param in self.named_parameters:
if param.grad is not None:
grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32)
norm[name] = grad_norm.item()
if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any():
gradients[name] = param.grad.data
if len(gradients) > 0:
logger.info("Detected nan/inf grad norm, dumping norms...")
logger.info(f"norms: {norm}")
logger.info(f"gradients: {gradients}")
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
torch.is_floating_point(tensor)
# single value tensors (like the loss) will not provide much info
and tensor.numel() >= 2
):
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = "has_printed_b" if backward else "has_printed_f"
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
| data2vec_vision-main | deltalm/src/fairseq/nan_detector.py |
__version__ = "1.0.0a0"
| data2vec_vision-main | deltalm/src/fairseq/version.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
from typing import Union
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import populate_dataclass, merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
REGISTRIES = {}
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="fairseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
| data2vec_vision-main | deltalm/src/fairseq/registry.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import os
import sys
try:
from .version import __version__ # noqa
except ImportError:
version_txt = os.path.join(os.path.dirname(__file__), "version.txt")
with open(version_txt) as f:
__version__ = f.read().strip()
__all__ = ["pdb"]
# backwards compatibility to support `from fairseq.meters import AverageMeter`
from fairseq.logging import meters, metrics, progress_bar # noqa
sys.modules["fairseq.meters"] = meters
sys.modules["fairseq.metrics"] = metrics
sys.modules["fairseq.progress_bar"] = progress_bar
# initialize hydra
from fairseq.dataclass.initialize import hydra_init
hydra_init()
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.scoring # noqa
import fairseq.tasks # noqa
import fairseq.token_generation_constraints # noqa
import fairseq.benchmark # noqa
import fairseq.model_parallel # noqa
| data2vec_vision-main | deltalm/src/fairseq/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input")
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimenions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens).to(src_tokens.device)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
src_lang_id = sample['net_input']['src_lang_id'] if 'src_lang_id' in sample['net_input'].keys() else None,
tgt_lang_id = sample['net_input']['tgt_lang_id'] if 'tgt_lang_id' in sample['net_input'].keys() else None,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
lprobs = self._no_repeat_ngram(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
if step >= max_len:
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
def calculate_banned_tokens(
self,
tokens,
step: int,
gen_ngrams: List[Dict[str, List[int]]],
no_repeat_ngram_size: int,
bbsz_idx: int,
):
tokens_list: List[int] = tokens[
bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1
].tolist()
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = ",".join([str(x) for x in tokens_list])
return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], []))
def transpose_list(self, l: List[List[int]]):
# GeneratorExp aren't supported in TS so ignoring the lint
min_len = min([len(x) for x in l]) # noqa
l2 = [[row[i] for row in l] for i in range(min_len)]
return l2
def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams: List[Dict[str, List[int]]] = [
torch.jit.annotate(Dict[str, List[int]], {})
for bbsz_idx in range(bsz * beam_size)
]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist()
for ngram in self.transpose_list(
[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]
):
key = ",".join([str(x) for x in ngram[:-1]])
gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get(
key, torch.jit.annotate(List[int], [])
) + [ngram[-1]]
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [
self.calculate_banned_tokens(
tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx
)
for bbsz_idx in range(bsz * beam_size)
]
else:
banned_tokens = [
torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx][
torch.tensor(banned_tokens[bbsz_idx]).long()
] = torch.tensor(-math.inf).to(lprobs)
return lprobs
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
src_lang_id = None,
tgt_lang_id = None
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
if src_lang_id is not None and tgt_lang_id is not None:
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
src_lang_id = src_lang_id,
tgt_lang_id = tgt_lang_id
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out, src_lang_id = src_lang_id, tgt_lang_id = tgt_lang_id)
else:
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
if len(decoder_out[0].size()) == 4:
decoder_out_tuple = (
decoder_out[0][:, :, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
else:
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, models, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| data2vec_vision-main | deltalm/src/fairseq/sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
| data2vec_vision-main | deltalm/src/fairseq/pdb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
| data2vec_vision-main | deltalm/src/fairseq/tokenizer.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import Any, Dict, Iterator, List
import torch
from fairseq import utils
from fairseq.data import encoders
from omegaconf import open_dict
from torch import nn
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == "checkpoint_file":
checkpoint_file = v
elif (
k != "path"
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path["path"]
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith("."):
kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs["data"] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
"code": "bpe_codes",
"bpecodes": "bpe_codes",
"sentencepiece.bpe.model": "sentencepiece_model",
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if "user_dir" in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"]))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
"args": args,
"task": task,
"models": models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, cfg, task, models):
super().__init__()
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(cfg.generation.replace_unk)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(
self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs
) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(
self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [
hypos[0]
for hypos in self.generate(
tokenized_sentences, score_reference=True, **kwargs
)
]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info("S\t{}".format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo["tokens"])
logger.info("H\t{}\t{}".format(hypo["score"], hypo_str))
logger.info(
"P\t{}".format(
" ".join(
map(
lambda x: "{:.4f}".format(x),
hypo["positional_scores"].tolist(),
)
)
)
)
if hypo["alignment"] is not None and getarg(
"print_alignment", False
):
logger.info(
"A\t{}".format(
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in hypo["alignment"]
]
)
)
)
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| data2vec_vision-main | deltalm/src/fairseq/hub_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import torch
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(
self,
tgt_dict,
softmax_batch=None,
compute_alignment=False,
eos=None,
symbols_to_strip_from_output=None,
):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_alignment = compute_alignment
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample["target"]
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get("attn", None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample["target"] = tgt
curr_prob = model.get_normalized_probs(
bd, log_probs=len(models) == 1, sample=sample
).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(
curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt
)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample["target"] = orig_target
probs = probs.view(sample["target"].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if torch.is_tensor(attn):
attn = attn.data
else:
attn = attn[0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = (
utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad)
if sample["target"] is not None
else None
)
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
if self.compute_alignment:
alignment = utils.extract_hard_alignment(
avg_attn_i,
sample["net_input"]["src_tokens"][i],
sample["target"][i],
self.pad,
self.eos,
)
else:
alignment = None
else:
avg_attn_i = alignment = None
hypos.append(
[
{
"tokens": ref,
"score": score_i,
"attention": avg_attn_i,
"alignment": alignment,
"positional_scores": avg_probs_i,
}
]
)
return hypos
| data2vec_vision-main | deltalm/src/fairseq/sequence_scorer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
from typing import Dict, Optional
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(
b for b in cls.__bases__ if b != FairseqIncrementalState
)
return cls
| data2vec_vision-main | deltalm/src/fairseq/incremental_decoding_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import tempfile
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import torch
import torch.nn.functional as F
from fairseq.data import iterators
from fairseq.file_io import PathManager
from fairseq.logging.meters import safe_round
from fairseq.modules import gelu, gelu_accurate
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str) -> List[str]:
return (
paths.split(os.pathsep)
if "://" not in paths
else paths.split(MANIFOLD_PATH_SEP)
)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens,
src_str,
alignment,
align_dict,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if xm is not None:
state["xla_rng_state"] = xm.get_rng_state()
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if xm is not None:
xm.set_rng_state(state["xla_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if xm is not None:
xm.set_rng_state(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device():
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
| data2vec_vision-main | deltalm/src/fairseq/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import collections
import contextlib
import logging
import os
import re
import traceback
from collections import OrderedDict
from typing import Optional, Union
import torch
from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
overwrite_args_by_name,
)
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig, open_dict
logger = logging.getLogger(__name__)
def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):
from fairseq import meters
# only one worker should attempt to create the required dir
if cfg.distributed_rank == 0:
os.makedirs(cfg.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if cfg.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if cfg.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if cfg.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = cfg.checkpoint_suffix or ""
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and cfg.save_interval_updates > 0
and updates % cfg.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and cfg.keep_best_checkpoints > 0:
checkpoint_conds[
"checkpoint.best_{}_{:.2f}.pt".format(cfg.best_checkpoint_metric, val_loss)
] = not hasattr(save_checkpoint, "best") or is_better(
val_loss, save_checkpoint.best
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not cfg.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and cfg.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
cfg.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[cfg.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if cfg.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(cfg.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[cfg.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if cfg.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
cfg.save_dir,
pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(
cfg.best_checkpoint_metric
),
)
if not cfg.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[cfg.keep_best_checkpoints :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if cfg.finetune_from_model is not None and (
reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader
):
raise ValueError(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
)
suffix = cfg.checkpoint_suffix
if (
cfg.restore_file == "checkpoint_last.pt"
): # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(
cfg.save_dir, "checkpoint_last{}.pt".format(suffix)
)
first_launch = not PathManager.exists(checkpoint_path)
if cfg.finetune_from_model is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(cfg.finetune_from_model):
checkpoint_path = cfg.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(
f"loading pretrained model from {checkpoint_path}: "
"optimizer, lr scheduler, meters, dataloader will be reset"
)
else:
raise ValueError(
f"--funetune-from-model {cfg.finetune_from_model} does not exist"
)
elif cfg.model_parallel_size > 1:
checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = cfg.restore_file
if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model:
raise ValueError(
"--finetune-from-model and --restore-file (non-default value) "
"can not be specified together: " + str(cfg)
)
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
with open(PathManager.get_local_path(path), "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
if "args" in state and state["args"] is not None and arg_overrides is not None:
args = state["args"]
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
if "cfg" in state and state["cfg"] is not None and arg_overrides is not None:
overwrite_args_by_name(state["cfg"], arg_overrides)
#state = _upgrade_state_dict(state)
return state
def load_model_ensemble(
filenames,
arg_overrides=None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble, args, _task = load_model_ensemble_and_task(
filenames,
arg_overrides,
task,
strict,
suffix,
num_shards,
state,
)
return ensemble, args
def load_transformer_model_ensemble(
filenames,
arg_overrides=None,
strict=True,
suffix="",
num_shards=1,
state=None,
src_dict=None,
tgt_dict=None
):
assert state is None or len(filenames) == 1
from fairseq.models.transformer import TransformerModel
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
assert num_shards > 0
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
# build model for ensemble
model = TransformerModel.build_model_without_task(cfg.model, src_dict, tgt_dict)
state = expand_embedding_matrix(state, model)
model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model)
# reset state so it gets loaded for the next model in ensemble
state = None
ensemble.append(model)
return ensemble, cfg
def load_xlmt_model_ensemble(
filenames,
arg_overrides=None,
strict=True,
suffix="",
num_shards=1,
state=None,
src_dict=None,
tgt_dict=None
):
assert state is None or len(filenames) == 1
from fairseq.models.xlmt_decoder_variant import XLMTDecoderVariantModel
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
assert num_shards > 0
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
# build model for ensemble
model = XLMTDecoderVariantModel.build_model_without_task(cfg.model, src_dict, tgt_dict)
state = expand_embedding_matrix(state, model)
model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model)
# reset state so it gets loaded for the next model in ensemble
state = None
ensemble.append(model)
return ensemble, cfg
def load_model_ensemble_and_task(
filenames,
arg_overrides=None,
task=None,
strict=True,
suffix="",
num_shards=1,
state=None,
):
assert state is None or len(filenames) == 1
from fairseq import tasks
assert not (
strict and num_shards > 1
), "Cannot load state dict with strict=True and checkpoint shards > 1"
ensemble = []
cfg = None
for filename in filenames:
orig_filename = filename
assert num_shards > 0
for shard_idx in range(num_shards):
if num_shards == 1:
filename = filename.replace(".pt", suffix + ".pt")
else:
filename = orig_filename[:-3] + f"_part{shard_idx}.pt"
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
if state is None:
state = load_checkpoint_to_cpu(filename, arg_overrides)
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
else:
raise RuntimeError(
f"Neither args nor cfg exist in state keys = {state.keys()}"
)
if task is None:
task = tasks.setup_task(cfg.task)
# build model for ensemble
model = task.build_model(cfg.model)
state = expand_embedding_matrix(state, model)
model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model)
# reset state so it gets loaded for the next model in ensemble
state = None
ensemble.append(model)
return ensemble, cfg, task
def expand_embedding_matrix(state, model, init="zero"):
"""
:param state: Checkpoint
:param model: self.model
:param init: 'zero' and 'random' for initialization
:return:
"""
embed_dim = model.encoder.embed_tokens.weight.size(1)
if state['model']['encoder.embed_tokens.weight'].size(0) < model.encoder.embed_tokens.weight.size(0):
offset = model.encoder.embed_tokens.weight.size(0) - state['model']['encoder.embed_tokens.weight'].size(0)
if init == "zero":
expanded_embed = state['model']['encoder.embed_tokens.weight'].new(offset, embed_dim).fill_(0)
elif init == "random":
expanded_embed = state['model']['encoder.embed_tokens.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_embed, mean=0, std=embed_dim ** -0.5)
state['model']['encoder.embed_tokens.weight'] = torch.cat([state['model']['encoder.embed_tokens.weight'], expanded_embed], dim=0)
state['model']['decoder.embed_tokens.weight'] = state['model']['encoder.embed_tokens.weight']
state['model']['decoder.output_projection.weight'] = state['model']['encoder.embed_tokens.weight']
logger.info("Expanding the embedding matrix to match the current shape...")
if hasattr(state['model'], 'encoder.embed_positions.weight') and state['model']['encoder.embed_positions.weight'].size(0) < model.encoder.embed_positions.weight.size(0):
offset = model.encoder.embed_positions.weight.size(0) - state['model']['encoder.embed_positions.weight'].size(0)
expanded_encoder_pos = state['model']['encoder.embed_positions.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_encoder_pos, mean=0, std=embed_dim ** -0.5)
state['model']['encoder.embed_positions.weight'] = torch.cat([state['model']['encoder.embed_positions.weight'], expanded_encoder_pos], dim=0)
logger.info("Expanding the encoder positional embedding matrix to match the current shape...")
if hasattr(state['model'], 'decoder.embed_positions.weight') and state['model']['decoder.embed_positions.weight'].size(0) < model.decoder.embed_positions.weight.size(0):
offset = model.decoder.embed_positions.weight.size(0) - state['model']['decoder.embed_positions.weight'].size(0)
expanded_decoder_pos = state['model']['decoder.embed_positions.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_decoder_pos, mean=0, std=embed_dim ** -0.5)
state['model']['decoder.embed_positions.weight'] = torch.cat([state['model']['decoder.embed_positions.weight'], expanded_decoder_pos], dim=0)
logger.info("Expanding the decoder positional embedding matrix to match the current shape...")
return state
def expand_embedding_matrix_v2(state, model, init="zero"):
"""
:param state: Checkpoint
:param model: self.model
:param init: 'zero' and 'random' for initialization
:return:
"""
embed_dim = model.embed_tokens.weight.size(1)
if state['encoder.embed_tokens.weight'].size(0) < model.embed_tokens.weight.size(0):
offset = model.embed_tokens.weight.size(0) - state['encoder.embed_tokens.weight'].size(0)
if init == "zero":
expanded_embed = state['encoder.embed_tokens.weight'].new(offset, embed_dim).fill_(0)
elif init == "random":
expanded_embed = state['encoder.embed_tokens.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_embed, mean=0, std=embed_dim ** -0.5)
state['encoder.embed_tokens.weight'] = torch.cat([state['encoder.embed_tokens.weight'], expanded_embed], dim=0)
state['decoder.embed_tokens.weight'] = state['encoder.embed_tokens.weight']
state['decoder.output_projection.weight'] = state['encoder.embed_tokens.weight']
logger.info("Expanding the embedding matrix to match the current shape...")
if state['encoder.embed_positions.weight'].size(0) < model.embed_positions.weight.size(0):
offset = model.embed_positions.weight.size(0) - state['encoder.embed_positions.weight'].size(0)
expanded_encoder_pos = state['encoder.embed_positions.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_encoder_pos, mean=0, std=embed_dim ** -0.5)
state['encoder.embed_positions.weight'] = torch.cat([state['encoder.embed_positions.weight'], expanded_encoder_pos], dim=0)
logger.info("Expanding the encoder positional embedding matrix to match the current shape...")
if state['decoder.embed_positions.weight'].size(0) < model.embed_positions.weight.size(0):
offset = model.embed_positions.weight.size(0) - state['decoder.embed_positions.weight'].size(0)
expanded_decoder_pos = state['decoder.embed_positions.weight'].new(offset, embed_dim)
torch.nn.init.normal_(expanded_decoder_pos, mean=0, std=embed_dim ** -0.5)
state['decoder.embed_positions.weight'] = torch.cat([state['decoder.embed_positions.weight'], expanded_decoder_pos], dim=0)
logger.info("Expanding the decoder positional embedding matrix to match the current shape...")
return state
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
def save_state(
filename,
cfg: FairseqConfig,
model_state_dict,
criterion,
optimizer,
lr_scheduler,
num_updates,
optim_history=None,
extra_state=None,
**kwargs,
):
from fairseq import utils
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
"cfg": cfg,
"args": kwargs.get("args", None),
"model": model_state_dict or {},
"optimizer_history": optim_history
+ [
{
"criterion_name": criterion.__class__.__name__,
"optimizer_name": optimizer.__class__.__name__,
"lr_scheduler_state": lr_scheduler.state_dict(),
"num_updates": num_updates,
}
],
"extra_state": extra_state,
}
if utils.has_parameters(criterion):
state_dict["criterion"] = criterion.state_dict()
if cfg is None:
cfg = state_dict["args"]
assert cfg is not None, "must provide cfg or args"
if isinstance(cfg, DictConfig):
no_save_optimizer_state = cfg.checkpoint.no_save_optimizer_state
else:
no_save_optimizer_state = cfg.no_save_optimizer_state
if not no_save_optimizer_state:
state_dict["last_optimizer_state"] = optimizer.state_dict()
# keep everything on CPU
state_dict = utils.move_to_cpu(state_dict)
with PathManager.open(filename, "wb") as f:
torch_persistent_save(state_dict, f)
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1] and state["optimizer_history"] is not None:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if state["optimizer_history"] is not None and "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if state["optimizer_history"] is not None and "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if state["optimizer_history"] is not None and "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# use stateful training data iterator
if state["extra_state"] is not None and "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# backward compatibility, cfg updates
if "args" in state and state["args"] is not None:
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1), 1
)
# --remove-bpe ==> --postprocess
if hasattr(state["args"], "remove_bpe"):
state["args"].post_process = state["args"].remove_bpe
# --min-lr ==> --stop-min-lr
if hasattr(state["args"], "min_lr"):
state["args"].stop_min_lr = state["args"].min_lr
del state["args"].min_lr
# binary_cross_entropy => wav2vec criterion
if hasattr(state["args"], "criterion") and state["args"].criterion == "binary_cross_entropy":
state["args"].criterion = "wav2vec"
# speech_pretraining => audio pretraining
if hasattr(state["args"], "task") and state["args"].task == "speech_pretraining":
state["args"].task = "audio_pretraining"
# audio_cpc => wav2vec
if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc":
state["args"].arch = "wav2vec"
state["cfg"] = convert_namespace_to_omegaconf(state["args"])
if "cfg" in state and state["cfg"] is not None:
with open_dict(state["cfg"]):
if state["cfg"].task is not None:
# old model checkpoints may not have separate source/target positions
if hasattr(state["cfg"].task, "max_positions") and not hasattr(
state["cfg"].task, "max_source_positions"
):
state["cfg"].task.max_source_positions = state[
"cfg"
].task.max_positions
state["cfg"].task.max_target_positions = state[
"cfg"
].task.max_positions
return state
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
arch = None
if model_cfg is not None:
arch = (
model_cfg._name
if isinstance(model_cfg, DictConfig)
else getattr(model_cfg, "arch", None)
)
if not model_cfg or arch is None or arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None)
decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
int(layer_string) for layer_string in layers_to_keep.split(",")
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, "encoder_layers_to_keep"):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, "decoder_layers_to_keep"):
model_cfg.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning(
"Unable to access checkpoint save directory: {}".format(save_dir)
)
raise e
else:
os.remove(temp_file_path)
| data2vec_vision-main | deltalm/src/fairseq/checkpoint_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.modules.quantization import pq, quantization_options, scalar
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
def quantize_model_scalar(model, model_cfg: DictConfig):
quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0
if quant_noise_scalar > 0:
# quantize_model edits the model in place
scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000)
return model
class Quantizer(object):
def __init__(self, config_path, max_epoch, max_update):
try:
import yaml
except ImportError:
raise ImportError("Please install yaml with: pip install yaml")
# parse config
if config_path:
with open(config_path) as config_file:
config = quantization_options.parse_config_yaml(
yaml.safe_load(config_file)
)
else:
config = quantization_options.parse_config_yaml({})
self.n_centroids_config = config["n_centroids"]
self.block_sizes_config = config["block_sizes"]
self.layers_to_quantize = config["layers_to_quantize"]
# We assume that training will run for a fixed number of epochs
# (or updates) and that we should train for equal durations
# between iterations of PQ.
num_iterations = len(self.layers_to_quantize)
if max_epoch > 0:
assert max_epoch % num_iterations == 0, (
"for iterative PQ, --max-epoch (={}) must be evenly divisible by "
"len(layers_to_quantize) (={})".format(max_epoch, num_iterations)
)
self.epoch_schedule = max_epoch // num_iterations
else:
self.epoch_schedule = None
if max_update > 0:
assert max_update % num_iterations == 0, (
"for iterative PQ, --max-update (={}) must be evenly divisible by "
"len(layers_to_quantize) (={})".format(max_update, num_iterations)
)
self.update_schedule = max_update // num_iterations
else:
self.update_schedule = None
assert (self.epoch_schedule is not None) ^ (
self.update_schedule is not None
), "for iterative PQ, cannot specify both --max-update and --max-epoch"
# 0 is a special value for quantization step, which will force
# the first call to begin_epoch() to call step()
self.quantization_step = 0
def set_trainer(self, trainer):
self.trainer = trainer
self.size_tracker = pq.SizeTracker(self.trainer.get_model())
def step(self):
"""Move to the next stage of quantization."""
if self.quantization_step >= len(self.layers_to_quantize):
# Maybe we just finished the last training step or we loaded
# a checkpoint for an iterative PQ model which previously
# finished training. Either way, don't quantize again.
return
logger.info(
"quantizing model (step={}; layers_to_quantize[step]={})".format(
self.quantization_step, self.layers_to_quantize[self.quantization_step]
)
)
quantized_layers = pq.quantize_model_(
self.trainer.get_model(),
self.size_tracker,
self.layers_to_quantize,
self.block_sizes_config,
self.n_centroids_config,
step=self.quantization_step,
)
logger.info("quantized layers: {}".format(quantized_layers))
logger.info(self.size_tracker)
self.quantization_step += 1
# reintialize the Trainer since model parameters have changed
self.trainer.reinitialize()
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch (epochs start at 1)."""
if (
(
self.epoch_schedule is not None
and epoch > 0
and (epoch - 1) % self.epoch_schedule == 0
)
# we always step once in the beginning, even if using
# update-based quantization
or self.quantization_step == 0
):
self.step()
def step_update(self, num_updates):
"""Called at the end of each step."""
if (
self.update_schedule is not None
and num_updates > 0
and num_updates % self.update_schedule == 0
):
self.step()
def state_dict(self):
return {
"n_centroids_config": self.n_centroids_config,
"block_sizes_config": self.block_sizes_config,
"layers_to_quantize": self.layers_to_quantize,
"epoch_schedule": self.epoch_schedule,
"update_schedule": self.update_schedule,
"quantization_step": self.quantization_step,
}
def load_state_dict(self, state_dict):
self.n_centroids_config = state_dict["n_centroids_config"]
self.block_sizes_config = state_dict["block_sizes_config"]
self.layers_to_quantize = state_dict["layers_to_quantize"]
self.epoch_schedule = state_dict["epoch_schedule"]
self.update_schedule = state_dict["update_schedule"]
self.quantization_step = state_dict["quantization_step"]
| data2vec_vision-main | deltalm/src/fairseq/quantization_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
import os
import pickle
import json
import random
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from fairseq import utils
from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig
from omegaconf import open_dict
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
# Flag to indicate if we're using Megatron
# NOTE: this is a temporary hack until we move away from Megatron's model parallel init
_USE_MEGATRON = False
# Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops.
_USE_XLA = False
logger = logging.getLogger(__name__)
def is_master(cfg: DistributedTrainingConfig):
return cfg.distributed_rank == 0
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
if cfg.pipeline_model_parallel:
balance_exists = (
cfg.pipeline_balance is not None
or cfg.pipeline_encoder_balance is not None
or cfg.pipeline_decoder_balance is not None
)
devices_exist = (
cfg.pipeline_devices is not None
or cfg.pipeline_encoder_devices is not None
or cfg.pipeline_decoder_devices is not None
)
if not balance_exists:
raise ValueError(
"--pipeline-balance is currently required for pipeline model parallelism"
)
if not devices_exist:
raise ValueError(
"--pipeline-devices is currently required for pipeline model parallelism"
)
cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int)
if cfg.pipeline_devices is not None:
cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int)
num_pipeline_devices = len(set(cfg.pipeline_devices))
else:
cfg.pipeline_encoder_devices = utils.eval_str_list(
cfg.pipeline_encoder_devices, type=int
)
cfg.pipeline_decoder_devices = utils.eval_str_list(
cfg.pipeline_decoder_devices, type=int
)
num_pipeline_devices = len(
set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices)
)
gpus_per_node = torch.cuda.device_count()
assert (
gpus_per_node >= num_pipeline_devices
and gpus_per_node % num_pipeline_devices == 0
), (
"the number of unique device IDs in --pipeline-devices must evenly divide "
"the number of GPUs per node (multi-node pipelining is not yet supported)"
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
elif all(
key in os.environ
for key in ["PHILLY_RUNTIME_CONFIG", "OMPI_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_LOCAL_RANK"]
):
runtime_config_file = os.environ.get("PHILLY_RUNTIME_CONFIG")
with open(runtime_config_file) as _f:
runtime_config = json.load(_f)
for name, config in runtime_config["containers"].items():
if config["index"] == 0:
master_config = config
master_ip = master_config["ip"]
master_port = master_config["portRangeStart"] + 1
cfg.distributed_init_method = f"tcp://{master_ip}:{master_port}"
cfg.distributed_world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
cfg.device_id = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
elif all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "OMPI_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_RANK"]
):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
# we can determine the init method automatically for Slurm
elif cfg.distributed_port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=cfg.distributed_port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
elif cfg.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
"SLURM --ntasks-per-node must match number of pipelines per "
"node (={})".format(num_pipelines_per_node)
)
cfg.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get("SLURM_NODEID"))
local_id = int(os.environ.get("SLURM_LOCALID"))
cfg.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
cfg.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
cfg.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == cfg.distributed_world_size // nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
assert (
cfg.distributed_world_size <= torch.cuda.device_count()
), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices"
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
if cfg.pipeline_model_parallel:
if not cfg.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert cfg.distributed_world_size % num_pipeline_devices == 0
cfg.distributed_world_size = (
cfg.distributed_world_size // num_pipeline_devices
)
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
assert cfg.distributed_rank % gpus_per_node == 0
assert cfg.distributed_rank % num_pipeline_devices == 0
with open_dict(cfg):
cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices
# launch one process per pipeline
cfg.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
cfg.device_id *= num_pipeline_devices
if cfg.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}".format(
cfg.device_id, cfg.distributed_rank
)
)
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}".format(
cfg.pipeline_devices, cfg.distributed_rank
)
)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
def distributed_init(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if not cfg.common.tpu:
if torch.distributed.is_available() and torch.distributed.is_initialized():
warnings.warn(
"Distributed is already initialized, cannot initialize twice!"
)
else:
logger.info(
"distributed init (rank {}): {}".format(
cfg.distributed_training.distributed_rank,
cfg.distributed_training.distributed_init_method,
)
)
dist.init_process_group(
backend=cfg.distributed_training.distributed_backend,
init_method=cfg.distributed_training.distributed_init_method,
world_size=cfg.distributed_training.distributed_world_size,
rank=cfg.distributed_training.distributed_rank,
)
logger.info(
"initialized host {} as rank {}".format(
socket.gethostname(),
cfg.distributed_training.distributed_rank,
)
)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
else:
assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size
global _USE_XLA
_USE_XLA = True
cfg.distributed_training.device_id = xm.get_local_ordinal()
cfg.distributed_training.distributed_rank = xm.get_ordinal()
xm.rendezvous("distributed_init") # wait for all workers
xm.mark_step()
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if cfg.common.model_parallel_size > 1:
try:
from fairseq.model_parallel.megatron.mpu import (
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number)
return cfg.distributed_training.distributed_rank
def distributed_main(i, main, cfg: FairseqConfig, kwargs):
cfg.distributed_training.device_id = i
if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn
cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i
cfg.distributed_training.distributed_rank = distributed_init(cfg)
after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None)
if after_distributed_init_fn:
cfg = after_distributed_init_fn(cfg)
main(cfg, **kwargs)
def call_main(cfg: FairseqConfig, main, **kwargs):
if cfg.distributed_training.distributed_init_method is None:
infer_init_method(cfg.distributed_training)
if cfg.distributed_training.distributed_init_method is not None:
# distributed training
if not cfg.distributed_training.distributed_no_spawn:
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None # assign automatically
kwargs["start_rank"] = start_rank
torch.multiprocessing.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=min(
torch.cuda.device_count(),
cfg.distributed_training.distributed_world_size,
),
)
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1:
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=8, # use all 8 TPU cores
)
else:
# single GPU main
torch.cuda.set_device(cfg.distributed_training.device_id)
main(cfg, **kwargs)
def use_xla():
global _USE_XLA
return _USE_XLA
def new_groups(grouped_ranks: List[List[int]]):
if use_xla():
return ("tpu", grouped_ranks)
else:
groups = [dist.new_group(g) for g in grouped_ranks]
my_group_idx = _find_my_group_index(grouped_ranks)
return groups[my_group_idx]
def _find_my_group_index(grouped_ranks):
my_rank = get_global_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def _find_my_group(grouped_ranks):
index = _find_my_group_index(grouped_ranks)
return grouped_ranks[index]
def get_rank(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return my_group.index(get_global_rank())
else:
return dist.get_rank(group=group)
def get_world_size(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return len(my_group)
elif torch.distributed.is_initialized():
return dist.get_world_size(group=group)
else:
return 1
def get_global_group():
if use_xla():
return new_groups([list(range(get_global_world_size()))])
elif torch.distributed.is_initialized():
if not hasattr(get_global_group, "_global_group"):
# ideally we could use torch.distributed.group.WORLD, but it seems
# to cause random NCCL hangs in some cases
get_global_group._global_group = dist.new_group()
return get_global_group._global_group
else:
return None
def get_global_rank():
if use_xla():
return xm.get_ordinal()
elif torch.distributed.is_initialized():
return torch.distributed.get_rank()
else:
return 0
def get_global_world_size():
if use_xla():
return xm.xrt_world_size()
elif torch.distributed.is_initialized():
return torch.distributed.get_world_size()
else:
return 1
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_data_parallel_group()
else:
return get_global_group()
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return get_rank(get_data_parallel_group())
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return get_world_size(get_data_parallel_group())
def get_model_parallel_group():
global _USE_MEGATRON
if _USE_MEGATRON:
from fairseq.model_parallel.megatron import mpu
return mpu.get_model_parallel_group()
else:
return None
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return get_rank(get_model_parallel_group())
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return get_world_size(get_model_parallel_group())
def all_reduce(tensor, group, op="sum"):
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
tensor = [tensor] # wrap in a list to make xm.all_reduce in-place
return xm.all_reduce(op, tensor, groups=group[1])[0]
else:
if op == "sum":
op = dist.ReduceOp.SUM
elif op == "max":
op = dist.ReduceOp.MAX
else:
raise NotImplementedError
dist.all_reduce(tensor, op=op, group=group)
return tensor
def broadcast(tensor, src, group):
if use_xla():
# XLA doesn't support broadcast, hack it with all_reduce
if get_rank(group) != src:
tensor.zero_()
all_reduce(tensor, group)
else:
dist.broadcast(tensor, src=src, group=group)
def all_to_all(tensor, group):
"""Perform an all-to-all operation on a 1D Tensor."""
assert tensor.dim() == 1
split_count = get_world_size(group=group)
assert tensor.numel() % split_count == 0
if use_xla():
assert isinstance(group, tuple) and group[0] == "tpu"
return xm.all_to_all(
tensor,
split_dimension=0,
concat_dimension=0,
split_count=split_count,
groups=group[1],
)
else:
output = torch.zeros_like(tensor)
dist.all_to_all_single(output, tensor, group=group)
return output
def all_gather(tensor, group, return_tensor=False):
"""Perform an all-gather operation."""
if use_xla():
result = xm.all_gather(tensor, groups=group[1])
world_size = get_world_size(group=group)
result = result.view(world_size, *tensor.size())
if return_tensor:
return result
else:
return [result[i] for i in range(world_size)]
else:
world_size = get_world_size(group=group)
rank = get_rank(group=group)
tensor_list = [
tensor if i == rank else torch.empty_like(tensor) for i in range(world_size)
]
dist.all_gather(tensor_list, tensor, group=group)
if return_tensor:
return torch.stack(tensor_list, dim=0)
else:
return tensor_list
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group: group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
if group is None:
group = get_global_group()
rank = get_rank(group=group)
world_size = get_world_size(group=group)
buffer_size = max_size * world_size
if (
not hasattr(all_gather_list, "_buffer")
or all_gather_list._buffer.numel() < buffer_size
):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError(
"encoded data size ({}) exceeds max_size ({})".format(size, max_size)
)
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start : start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
(enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(
pickle.loads(
bytes(out_buffer[header_size : header_size + enc_size].tolist())
)
)
return result
except pickle.UnpicklingError:
raise Exception(
"Unable to unpickle data from other workers. all_gather_list requires all "
"workers to enter the function together, so this error usually indicates "
"that the workers have fallen out of sync somehow. Workers can fall out of "
"sync if one of them runs out of memory, or if there are other conditions "
"in your training script that can cause one worker to finish an epoch "
"while other workers are still iterating over their portions of the data. "
"Try rerunning with --ddp-backend=no_c10d and see if that helps."
)
def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group: group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device)
all_reduce(buf, group=group)
split_buf = torch.split(buf, [t.numel() for t in data.values()])
reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())]
return OrderedDict(zip(data.keys(), reduced_data))
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
def broadcast_tensors(
tensors: Optional[List[torch.Tensor]],
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
"""
Broadcasts a list of tensors without other (non-src) ranks needing to know
the dtypes/shapes of the tensors.
"""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
# share metadata first to simplify transfer
is_src_rank = (get_rank(group) == src_rank)
if is_src_rank:
metadata = [
{"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors
]
metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device)
else:
metadata = _broadcast_object_slow(None, src_rank, group, dist_device)
out_tensors = []
for i, meta in enumerate(metadata):
if is_src_rank:
tensor = tensors[i]
broadcast(tensors[i].to(dist_device), src=src_rank, group=group)
else:
tensor = torch.zeros(
[meta["size"].numel()], dtype=meta["dtype"], device=dist_device
)
broadcast(tensor, src=src_rank, group=group)
tensor = tensor.view(meta["size"]).to(meta["device"])
out_tensors.append(tensor)
return out_tensors
def broadcast_object(
obj: Any,
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> Any:
"""Broadcast an arbitrary Python object to other workers."""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
if get_rank(group) == src_rank:
# split the tensors from the non-tensors so we can broadcast them
# directly, avoiding unnecessary serialization/deserialization
tensors = []
obj = _split_tensors_from_obj(obj, tensors)
obj = _broadcast_object_slow(obj, src_rank, group, dist_device)
tensors = broadcast_tensors(tensors, src_rank, group, dist_device)
else:
obj = _broadcast_object_slow(None, src_rank, group, dist_device)
tensors = broadcast_tensors(None, src_rank, group, dist_device)
return _put_tensors_in_obj(obj, tensors)
def _broadcast_object_slow(
obj: Any, src_rank: int, group: object, dist_device: torch.device,
) -> Any:
if get_rank(group) == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device)
length = torch.LongTensor([len(buffer)]).to(dist_device)
broadcast(length, src=src_rank, group=group)
broadcast(buffer, src=src_rank, group=group)
else:
# Fetch from the source
length = torch.LongTensor([0]).to(dist_device)
broadcast(length, src=src_rank, group=group)
buffer = torch.ByteTensor(int(length.item())).to(dist_device)
broadcast(buffer, src=src_rank, group=group)
buffer = io.BytesIO(buffer.cpu().numpy())
obj = torch.load(buffer, map_location="cpu")
return obj
@dataclass(frozen=True)
class _TensorPlaceholder:
index: int
def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if torch.is_tensor(obj):
placeholder = _TensorPlaceholder(index=len(tensors))
tensors.append(obj)
return placeholder
elif isinstance(obj, dict):
return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_split_tensors_from_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_split_tensors_from_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_split_tensors_from_obj(v, tensors) for v in obj}
else:
return obj
def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if isinstance(obj, _TensorPlaceholder):
return tensors[obj.index]
elif isinstance(obj, dict):
return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_put_tensors_in_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_put_tensors_in_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_put_tensors_in_obj(v, tensors) for v in obj}
else:
return obj
| data2vec_vision-main | deltalm/src/fairseq/distributed_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from hashlib import sha256
from io import open
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info(
"loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file
)
)
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info(
"extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir
)
)
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, "r:" + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning(
"Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url,
attempt,
timeout,
exc_info=e,
)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(
partial(requests.head, url, allow_redirects=True), url
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except RuntimeError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| data2vec_vision-main | deltalm/src/fairseq/file_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional
import torch
import torch.nn as nn
from fairseq.token_generation_constraints import (
ConstraintState,
OrderedConstraintState,
UnorderedConstraintState,
)
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
self.stop_on_max_len = True
def step(
self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
prev_output_tokens: (bsz x step)
the previously generated oputput tokens
original_batch_idxs: (bsz)
the tensor with the batch indices, in the range [0, bsz)
this is useful in case there has been applied a re-ordering
and we need to know the orignal indices
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class PrefixConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, prefix_allowed_tokens_fn):
super().__init__(tgt_dict)
self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self.stop_on_max_len = True
@torch.jit.export
def apply_mask(self, x, prev_output_tokens, original_batch_idxs):
beam_size = x.shape[0] // original_batch_idxs.shape[0]
original_batch_idxs = (
original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist()
)
mask = torch.full_like(x, -math.inf)
for sent_i, (sent, batch_i) in enumerate(
zip(prev_output_tokens, original_batch_idxs)
):
mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0
return mask
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Tensor,
prev_output_tokens: Tensor,
original_batch_idxs: Tensor,
):
bsz, beam_size, vocab_size = lprobs.size()
lprobs += self.apply_mask(
lprobs.view(bsz * beam_size, 1, vocab_size),
prev_output_tokens,
original_batch_idxs,
).view(bsz, beam_size, vocab_size)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [
self.constraint_states[i] for i in batch_idxs.tolist()
]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [
self.constraint_states[sentid][i] for i in active_hypos[sentid]
]
@torch.jit.export
def step(
self,
step: int,
lprobs: Tensor,
scores: Optional[Tensor],
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[
not_finished_indices, self.eos
] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(
step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone(),
)
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(
self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor,
):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = (
torch.tensor(beamno, device=device)
.repeat(next_tokens.size(0))
.long()
)
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [
constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)
]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[: self.num_cands]
indices_buf = indices_buf[: self.num_cands]
beams_buf = beams_buf[: self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1),
beam_size,
replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(
self,
step: int,
lprobs,
scores,
prev_output_tokens: Optional[Tensor] = None,
original_batch_idxs: Optional[Tensor] = None,
):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| data2vec_vision-main | deltalm/src/fairseq/search.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from typing import List, Optional
try:
from fvcore.common.file_io import PathManager as FVCorePathManager
except ImportError:
FVCorePathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
fvcore's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if FVCorePathManager:
return FVCorePathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if FVCorePathManager:
return FVCorePathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if FVCorePathManager:
return FVCorePathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if FVCorePathManager:
return FVCorePathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if "manifold" not in path:
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if FVCorePathManager:
return FVCorePathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if FVCorePathManager:
return FVCorePathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
| data2vec_vision-main | deltalm/src/fairseq/file_io.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.