python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import checkpoint_utils, data from omegaconf import OmegaConf def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { "train_iterator": { "epoch": epoch, "iterations_in_epoch": iterations_in_epoch, "shuffle": False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset( tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False ) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_cfg(finetune_from_model): cfg_mock = OmegaConf.create( { "checkpoint": { "save_dir": None, "optimizer_overrides": "{}", "reset_dataloader": False, "reset_meters": False, "reset_optimizer": False, "reset_lr_scheduler": False, "finetune_from_model": finetune_from_model, "model_parallel_size": 1, "restore_file": "checkpoint_last.pt", }, "common": { "model_parallel_size": 1, }, } ) return cfg_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.cfg_mock = get_mock_cfg(None) self.patches = { "os.makedirs": MagicMock(), "os.path.join": MagicMock(), "os.path.isfile": MagicMock(return_value=True), "os.path.isabs": MagicMock(return_value=False), "fairseq.file_io.PathManager.exists": MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches["os.path.isfile"].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in [ "reset_optimizer", "reset_lr_scheduler", "reset_meters", "reset_dataloader", ]: with self.subTest(arg=arg): cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt") cfg_mock["checkpoint"][arg] = True with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint( cfg_mock.checkpoint, trainer ) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith("checkpoint_last.pt"): return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import tests.utils as test_utils import torch from fairseq.sequence_scorer import SequenceScorer class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { "source": torch.LongTensor([w1, w2, eos]), "target": torch.LongTensor([w1, w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ] ), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample["id"].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]["target"]) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models.transformer import TransformerModel from tests.test_sequence_generator import get_dummy_task_and_parser class TestInferenceDropout(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() TransformerModel.add_args(self.parser) self.args = self.parser.parse_args([]) self.args.encoder_layers = 2 self.args.decoder_layers = 1 logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_sets_inference_dropout_to_true(self): self.args.retain_dropout = True self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.apply_during_inference def test_inference_dropout_false_by_default(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert not self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference def test_applies_training_mode(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) assert self.transformer_model.encoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.training self.transformer_model.eval() assert not self.transformer_model.decoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.training def test_retain_modules(self): self.args.retain_dropout = True self.args.retain_dropout_modules = [ "TransformerEncoder", "TransformerEncoderLayer", ] self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference
EXA-1-master
exa/libraries/fairseq/tests/test_inference_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import typing as tp import unittest from tempfile import TemporaryDirectory from fairseq.binarizer import BinarizeSummary, FileBinarizer, VocabularyDatasetBinarizer from fairseq.data import Dictionary, indexed_dataset from tests.utils import make_data, sizes def build_vocab(data: tp.List[tp.List[str]]) -> Dictionary: d = Dictionary() for s in data: for token in s: d.add_symbol(token) d.finalize() return d class TestBinarizer(unittest.TestCase): def compare_ds_data(self, summary, data, prefix, impl, vocab): self.assertEqual(summary.num_seq, len(data)) self.assertEqual(summary.num_tok, sum([len(s) for s in data])) dataset = indexed_dataset.make_dataset(prefix, impl) self.assertEqual(len(dataset), len(data)) decoded = [vocab.string(dataset[i]).split() for i in range(0, len(dataset))] self.assertEqual(decoded, data) data_sizes = [i.item() for i in dataset.sizes] self.assertEqual(data_sizes, sizes(data)) def test_can_binarize_line(self): data = make_data(length=1) vocab = build_vocab(data) binarizer = VocabularyDatasetBinarizer( vocab, ) sentence = data[0] summary = BinarizeSummary() tensor = binarizer.binarize_line( " ".join(sentence), summary, ) self.assertEqual(len(tensor), len(sentence) + 1) self.assertEqual(summary.num_tok, len(sentence) + 1) self.assertEqual(summary.num_seq, 1) def test_can_binarize_file_chunk(self): # test without multiprocess logic with TemporaryDirectory() as dirname: raw_file = os.path.join(dirname, "raw1") prefix = os.path.join(dirname, "test1") impl = "mmap" data = make_data(out_file=raw_file) vocab = build_vocab(data) binarizer = VocabularyDatasetBinarizer( vocab, append_eos=False, ) summary = FileBinarizer._binarize_chunk_and_finalize( binarizer, raw_file, offset_start=0, offset_end=-1, output_prefix=prefix, dataset_impl=impl, vocab_size=len(vocab), ) self.compare_ds_data(summary, data, prefix, impl, vocab) def test_can_multiprocess(self): with TemporaryDirectory() as dirname: raw_file = os.path.join(dirname, "raw1") prefix = os.path.join(dirname, "test1") impl = "mmap" data = make_data(out_file=raw_file) vocab = build_vocab(data) binarizer = VocabularyDatasetBinarizer( vocab, append_eos=False, ) # with one worker summary = FileBinarizer.multiprocess_dataset( raw_file, impl, binarizer, output_prefix=prefix, vocab_size=len(vocab), num_workers=1, ) self.compare_ds_data(summary, data, prefix, impl, vocab) # with multiple worker prefix_multi = os.path.join(dirname, "test2") summary = FileBinarizer.multiprocess_dataset( raw_file, impl, binarizer, output_prefix=prefix_multi, vocab_size=len(vocab), num_workers=3, ) self.compare_ds_data(summary, data, prefix_multi, impl, vocab)
EXA-1-master
exa/libraries/fairseq/tests/test_binarizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random import unittest import pytest import torch from fairseq.modules.multihead_attention import MultiheadAttention, _mask_for_xformers BATCH = [20, 41, 97] SEQ = [64] EMB = [48] HEADS = [4] DROP = 0.1 DEVICE = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"] ATTN_MASK_DTYPE = [None, torch.uint8, torch.bool, torch.float] KEY_PADDING_MASK_DTYPE = [None, torch.uint8, torch.bool] # FIXME: some tests fail when decimal=2, fix this and set decimal to 2 def assert_almost_equal(x, y, decimal=1, err_msg=""): import numpy.testing as npt if isinstance(x, torch.Tensor): x = x.cpu().detach().numpy() if isinstance(y, torch.Tensor): y = y.cpu().detach().numpy() npt.assert_array_almost_equal(x, y, err_msg=err_msg, decimal=decimal) def _reset_seeds(): torch.manual_seed(0) torch.random.manual_seed(0) random.seed(0) torch.cuda.manual_seed_all(0) def _get_mask(to_dtype: torch.dtype, dim0: int, dim1: int): if to_dtype == torch.float: mask = torch.randint(0, 2, (dim0, dim1)).to(dtype=torch.bool) return mask.to(dtype=to_dtype).masked_fill(mask, -float("inf")) return torch.randint(0, 2, (dim0, dim1)).to(dtype=to_dtype) def test_mask_for_xformers(): # Additive Mask m_float_add = torch.tensor([float("-inf"), 0]).to(torch.float) m_float_add_flipped = torch.tensor([0, float("-inf")]).to(torch.float) m_float16_add = torch.tensor([float("-inf"), 0]).to(torch.float16) m_float16_add_flipped = torch.tensor([0, float("-inf")]).to(torch.float16) m_uint = torch.tensor([1, 0]).to(torch.uint8) m_uint_flipped = torch.tensor([0, 1]).to(torch.uint8) m_bool = torch.tensor([False, True]) assert torch.equal(_mask_for_xformers(m_float_add), m_float_add) assert torch.equal(_mask_for_xformers(m_float16_add), m_float16_add) assert torch.equal(_mask_for_xformers(m_uint), m_uint_flipped) assert torch.equal(_mask_for_xformers(m_bool), ~m_bool) assert torch.equal( _mask_for_xformers(m_float_add, to_dtype=torch.float16), m_float16_add ) assert torch.equal( _mask_for_xformers(m_float_add, to_dtype=torch.float), m_float_add ) assert torch.equal(_mask_for_xformers(m_float_add, to_dtype=torch.bool), m_bool) assert torch.equal( _mask_for_xformers(m_float_add, to_dtype=torch.uint8), m_uint_flipped ) assert torch.equal( _mask_for_xformers(m_float16_add, to_dtype=torch.float16), m_float16_add ) assert torch.equal( _mask_for_xformers(m_float16_add, to_dtype=torch.float), m_float_add ) assert torch.equal(_mask_for_xformers(m_float16_add, to_dtype=torch.bool), m_bool) assert torch.equal( _mask_for_xformers(m_float16_add, to_dtype=torch.uint8), m_uint_flipped ) assert torch.equal( _mask_for_xformers(m_bool, to_dtype=torch.float16), m_float16_add_flipped ) assert torch.equal( _mask_for_xformers(m_bool, to_dtype=torch.float), m_float_add_flipped ) assert torch.equal(_mask_for_xformers(m_bool, to_dtype=torch.bool), ~m_bool) assert torch.equal(_mask_for_xformers(m_bool, to_dtype=torch.uint8), m_uint) assert torch.equal( _mask_for_xformers(m_uint, to_dtype=torch.float16), m_float16_add ) assert torch.equal(_mask_for_xformers(m_uint, to_dtype=torch.float), m_float_add) assert torch.equal(_mask_for_xformers(m_uint, to_dtype=torch.bool), m_bool) assert torch.equal(_mask_for_xformers(m_uint, to_dtype=torch.uint8), m_uint_flipped) @pytest.mark.skipif(not torch.cuda.is_available(), reason="blocksparse requires gpu") @pytest.mark.skip(reason="not part of latest xformers") @pytest.mark.parametrize("device", ["cuda"]) @pytest.mark.parametrize("add_zero_attn", [False]) @pytest.mark.parametrize("batch_size", [20]) @pytest.mark.parametrize("embedding", [64]) @pytest.mark.parametrize("seq_len", [64]) @pytest.mark.parametrize("num_heads", [4]) def test_xformers_blocksparse_parity( device, add_zero_attn, batch_size, embedding, seq_len, num_heads, ): xformers_att_config = '{"name": "scaled_dot_product"}' xformers_blocksparse_blocksize = 16 xformers_blocksparse_layout = torch.ones( seq_len // xformers_blocksparse_blocksize, seq_len // xformers_blocksparse_blocksize, dtype=torch.int32, ) q = torch.rand(seq_len, batch_size, embedding).to(device).half() q.requires_grad = True k = torch.rand(seq_len, batch_size, embedding).to(device).half() k.requires_grad = True v = torch.rand(seq_len, batch_size, embedding).to(device).half() v.requires_grad = True q_ = q.detach().clone().half() q_.requires_grad = True k_ = k.detach().clone().half() k_.requires_grad = True v_ = v.detach().clone().half() v_.requires_grad = True _reset_seeds() xf_blocksparse_mha = ( MultiheadAttention( embedding, num_heads, dropout=0.0, add_zero_attn=add_zero_attn, xformers_att_config=xformers_att_config, xformers_blocksparse_layout=xformers_blocksparse_layout, xformers_blocksparse_blocksize=xformers_blocksparse_blocksize, ) .to(device) .half() ) xf_blocksparse_output, _ = xf_blocksparse_mha( q, k, v, ) _reset_seeds() xformers_mha = ( MultiheadAttention( embedding, num_heads, dropout=0.0, add_zero_attn=add_zero_attn, xformers_att_config=xformers_att_config, xformers_blocksparse_layout=None, ) .to(device) .half() ) xformers_output, _ = xformers_mha( q_, k_, v_, ) # # account for when nan != nan rand = random.uniform(0, 1) xformers_output = xformers_output.masked_fill(xformers_output.isnan(), rand) xf_blocksparse_output = xf_blocksparse_output.masked_fill( xf_blocksparse_output.isnan(), rand ) assert_almost_equal(xformers_output, xf_blocksparse_output) loss_blocksparse = torch.norm(xformers_output) loss_original = torch.norm(xf_blocksparse_output) loss_blocksparse.backward() loss_original.backward() q.masked_fill(q.isnan(), rand) q_.masked_fill(q_.isnan(), rand) k.masked_fill(k.isnan(), rand) k_.masked_fill(k_.isnan(), rand) v.masked_fill(v.isnan(), rand) v_.masked_fill(v_.isnan(), rand) assert_almost_equal(q.grad, q_.grad) assert_almost_equal(k.grad, k_.grad) assert_almost_equal(v.grad, v_.grad) @pytest.mark.parametrize("device", DEVICE) @pytest.mark.parametrize("attn_dtype", ATTN_MASK_DTYPE) @pytest.mark.parametrize("key_padding_dtype", KEY_PADDING_MASK_DTYPE) @pytest.mark.parametrize("add_bias_kv", [True, False]) @pytest.mark.parametrize("add_zero_attn", [True, False]) # TODO: test with static_kv True @pytest.mark.parametrize("static_kv", [False]) @pytest.mark.parametrize("batch_size", BATCH) @pytest.mark.parametrize("embedding", EMB) @pytest.mark.parametrize("seq_len", SEQ) @pytest.mark.parametrize("num_heads", HEADS) def test_xformers_single_forward_parity( device, attn_dtype, key_padding_dtype, add_bias_kv, add_zero_attn, static_kv, batch_size, embedding, seq_len, num_heads, ): xformers_att_config = '{"name": "scaled_dot_product"}' attn_mask = ( None if attn_dtype is None else _get_mask(to_dtype=attn_dtype, dim0=seq_len, dim1=seq_len).to(device) ) key_padding_mask = ( None if key_padding_dtype is None else _get_mask(to_dtype=key_padding_dtype, dim0=batch_size, dim1=seq_len).to( device ) ) q = torch.rand(seq_len, batch_size, embedding).to(device) q.requires_grad = True k = torch.rand(seq_len, batch_size, embedding).to(device) k.requires_grad = True v = torch.rand(seq_len, batch_size, embedding).to(device) v.requires_grad = True q_ = q.detach().clone() q_.requires_grad = True k_ = k.detach().clone() k_.requires_grad = True v_ = v.detach().clone() v_.requires_grad = True # TODO: dropouts in the two implementations lead to different entries dropped. _reset_seeds() xformers_mha = MultiheadAttention( embedding, num_heads, dropout=0.0, xformers_att_config=xformers_att_config, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ).to(device) xformers_output, _ = xformers_mha( q, k, v, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) _reset_seeds() original_mha = MultiheadAttention( embedding, num_heads, dropout=0.0, xformers_att_config=None, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ).to(device) original_output, _ = original_mha( q_, k_, v_, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) # account for when nan != nan if xformers_output.isnan().any() or original_output.isnan().any(): rand = random.uniform(0, 1) xformers_output = xformers_output.masked_fill(xformers_output.isnan(), rand) original_output = original_output.masked_fill(original_output.isnan(), rand) # torch.equal works for cpu, on cuda allclose is needed. assert torch.allclose( xformers_output, original_output, atol=1e-06 ), f"max diff is {torch.max(torch.abs(xformers_output - original_output))}" loss_xformers = torch.norm(xformers_output) loss_original = torch.norm(original_output) loss_xformers.backward() loss_original.backward() # torch.equal works for cpu, on cuda allclose is needed. assert torch.allclose( q.grad, q_.grad ), f"max diff is {torch.max(torch.abs(q.grad - q_.grad))}" assert torch.allclose( k.grad, k_.grad ), f"max diff is {torch.max(torch.abs(k.grad - k_.grad))}" assert torch.allclose( v.grad, v_.grad ), f"max diff is {torch.max(torch.abs(v.grad - v_.grad))}" def test_mask_padding_parity(): def old_padding_code(key_padding_mask, attn_mask): if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask), ], dim=1, ) return key_padding_mask, attn_mask # values don't matter for this test. mha = MultiheadAttention( embed_dim=8, num_heads=2, dropout=0.0, add_bias_kv=True, add_zero_attn=True, ) key_padding_mask = torch.rand((8, 64)) attn_mask = torch.rand((64, 64)) kp_mask_orig, a_mask_orig = old_padding_code(key_padding_mask, attn_mask) kp_mask_new, a_mask_new = mha._pad_masks(key_padding_mask, attn_mask) assert kp_mask_orig.size() == kp_mask_new.size() assert a_mask_orig.size() == a_mask_new.size() assert torch.equal(kp_mask_orig, kp_mask_new) assert torch.equal(a_mask_orig, a_mask_new) def test_add_bias_parity(): # values don't matter for this test. mha = MultiheadAttention( embed_dim=8, num_heads=2, dropout=0.0, add_bias_kv=True, add_zero_attn=True, ) def old_bias_code(k, v, key_padding_mask, attn_mask, bsz): k = torch.cat([k, mha.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, mha.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) return k, v, key_padding_mask, attn_mask seq_len = 64 bsz = 8 embedding = 8 key_padding_mask = torch.rand((bsz, seq_len)) attn_mask = torch.rand((seq_len, seq_len)) k = torch.rand((seq_len, bsz, embedding)) v = torch.rand((seq_len, bsz, embedding)) k_orig, v_orig, kp_mask_orig, a_mask_orig = old_bias_code( k, v, key_padding_mask, attn_mask, bsz ) k_new, v_new, kp_mask_new, a_mask_new = mha._add_bias( k, v, key_padding_mask, attn_mask, bsz ) assert torch.equal(k_orig, k_new) assert torch.equal(v_orig, v_new) assert torch.equal(kp_mask_orig, kp_mask_new) assert torch.equal(a_mask_orig, a_mask_new) class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), # prev_key_padding_mask already full ( torch.tensor([[0, 1, 0, 1]]).bool(), None, torch.tensor([[0, 1, 0, 1]]).bool(), ), # key_padding_mask already full ( None, torch.tensor([[0, 1, 0, 1]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f"Unexpected resultant key padding mask: {key_padding_mask}" f" given current: {c[0]} and previous: {c[1]}", ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) def test_pruning_heads(self): embed_dim = 768 num_heads = 12 num_heads_to_keep = 8 dummy_input = torch.randn(32, 2, embed_dim) mha = MultiheadAttention(embed_dim=embed_dim, num_heads=num_heads) reserve_head_index = mha._get_reserve_head_index( num_heads_to_keep=num_heads_to_keep ) mha._adaptive_prune_heads(reserve_head_index=reserve_head_index) mha._set_skip_embed_dim_check() mha(query=dummy_input, key=dummy_input, value=dummy_input) self.assertEqual(mha.head_dim, embed_dim / num_heads) self.assertEqual(mha.num_heads, num_heads_to_keep) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np from fairseq.data.data_utils_fast import batch_by_size_fn, batch_by_size_vec class TestBatchBySize(unittest.TestCase): @classmethod def batch_by_size_baseline( cls, indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): """Simple, reliable and slow implementation of batch by size""" batches = [] start = 0 while start < len(indices): for end in range(start + 1, len(indices) + 1): max_val = max(num_tokens_vec[pos] for pos in range(start, end)) sent_count = end - start num_tokens = max_val * sent_count overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0 terminate = overflow or end == len(indices) if overflow: sent_count -= 1 if terminate: if sent_count > bsz_mult: sent_count = sent_count - sent_count % bsz_mult batches.append(indices[start : start + sent_count]) start = start + sent_count break return batches @classmethod def _get_error_message( cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ): return f"""Reference batch_by_size implementation should produce same output as the baseline method. Params: max_sentences={max_sentences}, max_tokens={max_tokens}, bsz_mult={bsz_mult}, num_tokens_vec={num_tokens_vec}, expected_batches={validation}, returned_batches={results}""" def _compare_results( self, indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ): indices = np.array(list(range(indices_len))) validation = self.batch_by_size_baseline( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) results = batch_by_size_impl( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) error_msg = self._get_error_message( max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ) self.assertEqual(len(validation), len(results), error_msg) for first, second in zip(validation, results): self.assertTrue(np.array_equal(first, second), error_msg) def _run_compare_with_baseline_sweep(self, batch_by_size_impl): """Compare reference batch_by_size implementation with batch_by_size_baseline across a dense grid of hyperparam values""" MAX_MAX_TOKENS = 10 NUM_TOKENS_VECS_COUNT = 5 for indices_len in [10, 11]: # try odd and even len of indices for max_sentences in range(0, indices_len + 2): for max_tokens in range(0, MAX_MAX_TOKENS): for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2): for _ in range(NUM_TOKENS_VECS_COUNT): num_tokens_vec = np.random.randint( 0, max_tokens + 1, size=indices_len ) self._compare_results( indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ) class TestBatchBySizeVec(TestBatchBySize): def test_compare_with_baseline(self): self._run_compare_with_baseline_sweep(batch_by_size_vec) class TestBatchBySizeFn(TestBatchBySize): def test_compare_with_baseline(self): def batch_by_size_fn_wrapper( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): def num_tokens_fn(idx): return num_tokens_vec[idx] return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult ) self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import typing as tp import unittest from collections import Counter from tempfile import NamedTemporaryFile, TemporaryDirectory from fairseq.data import Dictionary, indexed_dataset from fairseq.data.huffman import ( HuffmanCodeBuilder, HuffmanCoder, HuffmanMMapIndexedDataset, HuffmanMMapIndexedDatasetBuilder, ) from tests.utils import POPULATION, make_data, sizes def make_counts(data: tp.List[tp.List[str]]) -> Counter: return Counter([symbol for sentence in data for symbol in sentence]) def make_code_builder(data: tp.List[tp.List[str]]) -> HuffmanCodeBuilder: builder = HuffmanCodeBuilder() for sentence in data: builder.add_symbols(*sentence) return builder class TestCodeBuilder(unittest.TestCase): def test_code_builder_can_count(self): data = make_data() counts = make_counts(data) builder = make_code_builder(data) self.assertEqual(builder.symbols, counts) def test_code_builder_can_add(self): data = make_data() counts = make_counts(data) builder = make_code_builder(data) new_builder = builder + builder self.assertEqual(new_builder.symbols, counts + counts) def test_code_builder_can_io(self): data = make_data() builder = make_code_builder(data) with NamedTemporaryFile() as tmp_fp: builder.to_file(tmp_fp.name) other_builder = HuffmanCodeBuilder.from_file(tmp_fp.name) self.assertEqual(builder.symbols, other_builder.symbols) class TestCoder(unittest.TestCase): def test_coder_can_io(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with NamedTemporaryFile() as tmp_fp: coder.to_file(tmp_fp.name) other_coder = HuffmanCoder.from_file(tmp_fp.name) self.assertEqual(coder, other_coder) def test_coder_can_encode_decode(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() encoded = [coder.encode(sentence) for sentence in data] decoded = [[n.symbol for n in coder.decode(enc)] for enc in encoded] self.assertEqual(decoded, data) unseen_data = make_data() unseen_encoded = [coder.encode(sentence) for sentence in unseen_data] unseen_decoded = [ [n.symbol for n in coder.decode(enc)] for enc in unseen_encoded ] self.assertEqual(unseen_decoded, unseen_data) def build_dataset(prefix, data, coder): with HuffmanMMapIndexedDatasetBuilder(prefix, coder) as builder: for sentence in data: builder.add_item(sentence) class TestHuffmanDataset(unittest.TestCase): def test_huffman_can_encode_decode(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix = os.path.join(dirname, "test1") build_dataset(prefix, data, coder) dataset = HuffmanMMapIndexedDataset(prefix) self.assertEqual(len(dataset), len(data)) decoded = [list(dataset.get_symbols(i)) for i in range(0, len(dataset))] self.assertEqual(decoded, data) data_sizes = [i.item() for i in dataset.sizes] self.assertEqual(data_sizes, sizes(data)) def test_huffman_compresses(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix = os.path.join(dirname, "huffman") build_dataset(prefix, data, coder) prefix_mmap = os.path.join(dirname, "mmap") mmap_builder = indexed_dataset.make_builder( indexed_dataset.data_file_path(prefix_mmap), "mmap", vocab_size=len(POPULATION), ) dictionary = Dictionary() for c in POPULATION: dictionary.add_symbol(c) dictionary.finalize() for sentence in data: mmap_builder.add_item(dictionary.encode_line(" ".join(sentence))) mmap_builder.finalize(indexed_dataset.index_file_path(prefix_mmap)) huff_size = os.stat(indexed_dataset.data_file_path(prefix)).st_size mmap_size = os.stat(indexed_dataset.data_file_path(prefix_mmap)).st_size self.assertLess(huff_size, mmap_size) def test_huffman_can_append(self): data1 = make_data() builder = make_code_builder(data1) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix1 = os.path.join(dirname, "test1") build_dataset(prefix1, data1, coder) data2 = make_data() prefix2 = os.path.join(dirname, "test2") build_dataset(prefix2, data2, coder) prefix3 = os.path.join(dirname, "test3") with HuffmanMMapIndexedDatasetBuilder(prefix3, coder) as builder: builder.append(prefix1) builder.append(prefix2) dataset = HuffmanMMapIndexedDataset(prefix3) self.assertEqual(len(dataset), len(data1) + len(data2)) decoded1 = [list(dataset.get_symbols(i)) for i in range(0, len(data1))] self.assertEqual(decoded1, data1) decoded2 = [ list(dataset.get_symbols(i)) for i in range(len(data1), len(dataset)) ] self.assertEqual(decoded2, data2) data_sizes = [i.item() for i in dataset.sizes] self.assertEqual(data_sizes[: len(data1)], sizes(data1)) self.assertEqual(data_sizes[len(data1) : len(dataset)], sizes(data2)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_huffman.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO from unittest.mock import patch from omegaconf import OmegaConf from fairseq import checkpoint_utils from tests.utils import ( create_dummy_data, preprocess_translation_data, train_translation_model, ) import torch class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @contextlib.contextmanager def _train_transformer(self, seed, extra_args=None): if extra_args is None: extra_args = [] with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--seed", str(seed), ] + extra_args, ) yield os.path.join(data_dir, "checkpoint_last.pt") def test_load_model_ensemble_and_task(self): # with contextlib.redirect_stdout(StringIO()): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model1, model2] ) self.assertEqual(len(ensemble), 2) # after Transformer has been migrated to Hydra, this will probably # become cfg.common.seed self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) # the task from the first model should be returned self.assertTrue("seed123" in task.cfg.data) # last cfg is saved self.assertEqual(cfg.common.seed, 456) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"] with self._train_transformer(seed=1, extra_args=extra_args) as model: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model], arg_overrides={ "encoder_layers_to_keep": "0,2", "decoder_layers_to_keep": "1", }, ) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1) def test_torch_persistent_save_async(self): state_dict = {} filename = "async_checkpoint.pt" with patch(f"{checkpoint_utils.__name__}.PathManager.opena") as mock_opena: with patch( f"{checkpoint_utils.__name__}._torch_persistent_save" ) as mock_save: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=True ) mock_opena.assert_called_with(filename, "wb") mock_save.assert_called() def test_load_ema_from_checkpoint(self): dummy_state = {"a": torch.tensor([1]), "b": torch.tensor([0.1])} with patch(f"{checkpoint_utils.__name__}.PathManager.open") as mock_open, patch( f"{checkpoint_utils.__name__}.torch.load" ) as mock_load: mock_load.return_value = {"extra_state": {"ema": dummy_state}} filename = "ema_checkpoint.pt" state = checkpoint_utils.load_ema_from_checkpoint(filename) mock_open.assert_called_with(filename, "rb") mock_load.assert_called() self.assertIn("a", state["model"]) self.assertIn("b", state["model"]) self.assertTrue(torch.allclose(dummy_state["a"], state["model"]["a"])) self.assertTrue(torch.allclose(dummy_state["b"], state["model"]["b"])) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_checkpoint_utils.py
import torch import numpy as np import unittest from fairseq.modules.rotary_positional_embedding import apply_rotary_pos_emb from fairseq.modules import RotaryPositionalEmbedding class TestRotaryPositionalEmbedding(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.rope_pos_emd = RotaryPositionalEmbedding(dim=self.C) def test_forward(self): expected_cos = torch.tensor( [[[[1.0000, 1.0000]]], [[[0.5403, 0.5403]]], [[[-0.4161, -0.4161]]]] ) expected_sin = torch.tensor( [[[[0.0000, 0.0000]]], [[[0.8415, 0.8415]]], [[[0.9093, 0.9093]]]] ) cos, sin = self.rope_pos_emd(self.sample, self.T) self.assertTrue( np.allclose( expected_cos.cpu().detach().numpy(), cos.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_sin.cpu().detach().numpy(), sin.cpu().detach().numpy(), atol=1e-4, ) ) def test_apply_rotary_pos_emb(self): cos, sin = self.rope_pos_emd(self.sample, self.T) query = self.sample.view(self.T, self.B, 1, self.C) expected_query = torch.tensor( [[[[1.5410, -0.2934]]], [[[-1.6555, -1.5263]]], [[[1.7231, -0.4041]]]] ) new_query, new_key = apply_rotary_pos_emb(query, query, cos, sin) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), new_query.cpu().detach().numpy(), atol=1e-4, ) ) self.assertTrue( np.allclose( expected_query.cpu().detach().numpy(), new_key.cpu().detach().numpy(), atol=1e-4, ) ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_rotary_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest import mock class TestIOPath(unittest.TestCase): def test_no_iopath(self): from .test_reproducibility import TestReproducibility with mock.patch.dict("sys.modules", {"iopath": None}): # reuse reproducibility tests, which are e2e tests that should cover # most checkpoint related functionality TestReproducibility._test_reproducibility(self, "test_reproducibility") def test_no_supports_rename(self): from .test_reproducibility import TestReproducibility with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn: mock_fn.return_value = False TestReproducibility._test_reproducibility(self, "test_reproducibility") if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_iopath.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import unittest import numpy as np from fairseq.data import ListDataset, ResamplingDataset class TestResamplingDataset(unittest.TestCase): def setUp(self): self.strings = ["ab", "c", "def", "ghij"] self.weights = [4.0, 2.0, 7.0, 1.5] self.size_ratio = 2 self.dataset = ListDataset( self.strings, np.array([len(s) for s in self.strings]) ) def _test_common(self, resampling_dataset, iters): assert len(self.dataset) == len(self.strings) == len(self.weights) assert len(resampling_dataset) == self.size_ratio * len(self.strings) results = {"ordered_by_size": True, "max_distribution_diff": 0.0} totalfreqs = 0 freqs = collections.defaultdict(int) for epoch_num in range(iters): resampling_dataset.set_epoch(epoch_num) indices = resampling_dataset.ordered_indices() assert len(indices) == len(resampling_dataset) prev_size = -1 for i in indices: cur_size = resampling_dataset.size(i) # Make sure indices map to same sequences within an epoch assert resampling_dataset[i] == resampling_dataset[i] # Make sure length of sequence is correct assert cur_size == len(resampling_dataset[i]) freqs[resampling_dataset[i]] += 1 totalfreqs += 1 if prev_size > cur_size: results["ordered_by_size"] = False prev_size = cur_size assert set(freqs.keys()) == set(self.strings) for s, weight in zip(self.strings, self.weights): freq = freqs[s] / totalfreqs expected_freq = weight / sum(self.weights) results["max_distribution_diff"] = max( results["max_distribution_diff"], abs(expected_freq - freq) ) return results def test_resampling_dataset_batch_by_size_false(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = False, the batches should be returned in # arbitrary order of size. assert not results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 def test_resampling_dataset_batch_by_size_true(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = True, the batches should be returned in # increasing order of size. assert results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_resampling_dataset.py
import os import shutil import tempfile import unittest from fairseq import options from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored from .utils import create_dummy_data, preprocess_lm_data, train_language_model def make_lm_config( data_dir=None, extra_flags=None, task="language_modeling", arch="transformer_lm_gpt2_tiny", ): task_args = [task] if data_dir is not None: task_args += [data_dir] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", *task_args, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", ] + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) return cfg def write_empty_file(path): with open(path, "w"): pass assert os.path.exists(path) class TestValidSubsetsErrors(unittest.TestCase): """Test various filesystem, clarg combinations and ensure that error raising happens as expected""" def _test_case(self, paths, extra_flags): with tempfile.TemporaryDirectory() as data_dir: [ write_empty_file(os.path.join(data_dir, f"{p}.bin")) for p in paths + ["train"] ] cfg = make_lm_config(data_dir, extra_flags=extra_flags) raise_if_valid_subsets_unintentionally_ignored(cfg) def test_default_raises(self): with self.assertRaises(ValueError): self._test_case(["valid", "valid1"], []) with self.assertRaises(ValueError): self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] ) def partially_specified_valid_subsets(self): with self.assertRaises(ValueError): self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] ) # Fix with ignore unused self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"], ) def test_legal_configs(self): self._test_case(["valid"], []) self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"]) self._test_case(["valid", "valid1"], ["--combine-val"]) self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"]) self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"]) self._test_case( ["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"] ) self._test_case( ["valid1"], ["--valid-subset", "valid1"] ) # valid.bin doesn't need to be ignored. def test_disable_validation(self): self._test_case([], ["--disable-validation"]) self._test_case(["valid", "valid1"], ["--disable-validation"]) def test_dummy_task(self): cfg = make_lm_config(task="dummy_lm") raise_if_valid_subsets_unintentionally_ignored(cfg) def test_masked_dummy_task(self): cfg = make_lm_config(task="dummy_masked_lm") raise_if_valid_subsets_unintentionally_ignored(cfg) class TestCombineValidSubsets(unittest.TestCase): def _train(self, extra_flags): with self.assertLogs() as logs: with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir, num_examples=20) preprocess_lm_data(data_dir) shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin") shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx") train_language_model( data_dir, "transformer_lm", ["--max-update", "0", "--log-format", "json"] + extra_flags, run_validation=False, ) return [x.message for x in logs.records] def test_combined(self): flags = ["--combine-valid-subsets", "--required-batch-size-multiple", "1"] logs = self._train(flags) assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1 assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined def test_subsets(self): flags = [ "--valid-subset", "valid,valid1", "--required-batch-size-multiple", "1", ] logs = self._train(flags) assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1 assert any(["valid1_ppl" in x for x in logs]) # metrics are combined
EXA-1-master
exa/libraries/fairseq/tests/test_valid_subset_checks.py
EXA-1-master
exa/libraries/fairseq/tests/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator class TestBacktranslationDataset(unittest.TestCase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model, ) = test_utils.sequence_generator_setup() dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_backtranslation_dataset.py
import contextlib import tempfile import unittest from io import StringIO import numpy as np from tests.utils import create_dummy_data, preprocess_lm_data, train_language_model try: from pyarrow import plasma from fairseq.data.plasma_utils import PlasmaStore, PlasmaView PYARROW_AVAILABLE = True except ImportError: PYARROW_AVAILABLE = False dummy_path = "dummy" @unittest.skipUnless(PYARROW_AVAILABLE, "") class TestPlasmaView(unittest.TestCase): def setUp(self) -> None: self.tmp_file = tempfile.NamedTemporaryFile() # noqa: P201 self.path = self.tmp_file.name self.server = PlasmaStore.start(path=self.path, nbytes=10000) self.client = plasma.connect(self.path, num_retries=10) def tearDown(self) -> None: self.client.disconnect() self.tmp_file.close() self.server.kill() def test_two_servers_do_not_share_object_id_space(self): data_server_1 = np.array([0, 1]) data_server_2 = np.array([2, 3]) server_2_path = self.path with tempfile.NamedTemporaryFile() as server_1_path: server = PlasmaStore.start(path=server_1_path.name, nbytes=10000) arr1 = PlasmaView( data_server_1, dummy_path, 1, plasma_path=server_1_path.name ) assert len(arr1.client.list()) == 1 assert (arr1.array == data_server_1).all() arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=server_2_path) assert (arr2.array == data_server_2).all() assert (arr1.array == data_server_1).all() server.kill() def test_hash_collision(self): data_server_1 = np.array([0, 1]) data_server_2 = np.array([2, 3]) arr1 = PlasmaView(data_server_1, dummy_path, 1, plasma_path=self.path) assert len(arr1.client.list()) == 1 arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=self.path) assert len(arr1.client.list()) == 1 assert len(arr2.client.list()) == 1 assert (arr2.array == data_server_1).all() # New hash key based on tuples arr3 = PlasmaView( data_server_2, dummy_path, (1, 12312312312, None), plasma_path=self.path ) assert ( len(arr2.client.list()) == 2 ), "No new object was created by using a novel hash key" assert ( arr3.object_id in arr2.client.list() ), "No new object was created by using a novel hash key" assert ( arr3.object_id in arr3.client.list() ), "No new object was created by using a novel hash key" del arr3, arr2, arr1 @staticmethod def _assert_view_equal(pv1, pv2): np.testing.assert_array_equal(pv1.array, pv2.array) def test_putting_same_array_twice(self): data = np.array([4, 4, 4]) arr1 = PlasmaView(data, dummy_path, 1, plasma_path=self.path) assert len(self.client.list()) == 1 arr1b = PlasmaView( data, dummy_path, 1, plasma_path=self.path ) # should not change contents of store arr1c = PlasmaView( None, dummy_path, 1, plasma_path=self.path ) # should not change contents of store assert len(self.client.list()) == 1 self._assert_view_equal(arr1, arr1b) self._assert_view_equal(arr1, arr1c) PlasmaView( data, dummy_path, 2, plasma_path=self.path ) # new object id, adds new entry assert len(self.client.list()) == 2 new_client = plasma.connect(self.path) assert len(new_client.list()) == 2 # new client can access same objects assert isinstance(arr1.object_id, plasma.ObjectID) del arr1b del arr1c def test_plasma_store_full_raises(self): with tempfile.NamedTemporaryFile() as new_path: server = PlasmaStore.start(path=new_path.name, nbytes=10000) with self.assertRaises(plasma.PlasmaStoreFull): # 2000 floats is more than 2000 bytes PlasmaView( np.random.rand(10000, 1), dummy_path, 1, plasma_path=new_path.name ) server.kill() def test_object_id_overflow(self): PlasmaView.get_object_id("", 2**21) def test_training_lm_plasma(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--use-plasma-view", "--plasma-path", self.path], run_validation=True, )
EXA-1-master
exa/libraries/fairseq/tests/test_plasma_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import os import random import sys import tempfile import unittest from packaging import version from io import StringIO from typing import Dict, List import torch from fairseq import options from fairseq_cli import eval_lm, train from tests.utils import ( create_dummy_data, create_laser_data_and_config_json, generate_main, preprocess_lm_data, preprocess_summarization_data, preprocess_translation_data, train_language_model, train_translation_model, ) try: import transformers # noqa has_hf_transformers = True except ImportError: has_hf_transformers = False class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--dataset-impl", "raw"]) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"] ) generate_main(data_dir, ["--dataset-impl", "raw"]) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_update_freq") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"] ) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_max_positions") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, "fconv_iwslt_de_en", ["--max-target-positions", "5"], ) self.assertTrue( "skip this example with --skip-invalid-size-inputs-valid-test" in str(context.exception) ) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--max-target-positions", "5", "--skip-invalid-size-inputs-valid-test", ], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"]) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_sampling") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main( data_dir, [ "--sampling", "--temperature", "2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topk", "3", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topp", "0.2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--diversity-rate", "0.5", "--beam", "6", ], ) with self.assertRaises(ValueError): generate_main( data_dir, [ "--diverse-beam-groups", "4", "--match-source-len", ], ) generate_main(data_dir, ["--prefix-size", "2"]) generate_main(data_dir, ["--retain-dropout"]) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--eval-bleu", "--eval-bleu-print-samples", "--eval-bleu-remove-bpe", "--eval-bleu-detok", "space", "--eval-bleu-args", '{"beam": 4, "min_len": 10}', ], ) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm_wiseman_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", ], ) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm", [ "--encoder-layers", "2", "--encoder-bidirectional", "--encoder-hidden-size", "16", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--decoder-layers", "2", ], ) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], run_validation=True, ) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_multilingual_transformer_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch="multilingual_transformer", task="multilingual_translation", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "multilingual_translation", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) @unittest.skipIf( sys.platform.lower() == "darwin", "skip latent depth test on MacOS" ) def test_multilingual_translation_latent_depth(self): # test with latent depth in encoder, decoder, or both encoder_latent_layer = [[], ["--encoder-latent-layer"]] decoder_latent_layer = [[], ["--decoder-latent-layer"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_latent_layer)): for j in range(len(decoder_latent_layer)): if i == 0 and j == 0: continue enc_ll_flag = encoder_latent_layer[i] dec_ll_flag = decoder_latent_layer[j] with tempfile.TemporaryDirectory( f"test_multilingual_translation_latent_depth_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="latent_multilingual_transformer", task="multilingual_translation_latent_depth", extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--share-encoders", "--share-decoders", "--sparsity-weight", "0.1", ] + enc_ll_flag + dec_ll_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", ] + enc_ll_flag + dec_ll_flag, ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--task", "multilingual_translation_latent_depth", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ll_flag + dec_ll_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_translation_multi_simple_epoch_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_no_vepoch(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_dicts(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_src_tgt_dict_spec(self): # test the specification of explicit --src-dict and --tgt-dict with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--source-dict", f"{data_dir}/dict.in.txt", "--target-dict", f"{data_dir}/dict.out.txt", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_cross_self_attention" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-embed-dim", "8", "--no-cross-attention", "--cross-self-attention", ], run_validation=True, ) generate_main(data_dir, extra_flags=[]) @unittest.skipIf( version.parse(torch.__version__) > version.parse("1.8"), "skip for latest torch versions", ) def test_transformer_pointer_generator(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_pointer_generator" ) as data_dir: create_dummy_data(data_dir) preprocess_summarization_data(data_dir) train_translation_model( data_dir, "transformer_pointer_generator", extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--alignment-layer", "-1", "--alignment-heads", "1", "--source-position-markers", "0", ], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "lightweight", "--decoder-conv-type", "lightweight", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "dynamic", "--decoder-conv-type", "dynamic", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "cmlm_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "0", "--iter-decode-eos-penalty", "0", "--print-step", ], ) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_iterative_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "iterative_nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--stochastic-approx", "--dae-ratio", "0.5", "--train-step", "3", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "insertion_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "random_mask", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_moe") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main( data_dir, [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--gen-expert", "0", ], ) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", ], run_validation=True, ) generate_main(data_dir) def test_laser_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_lstm", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-bidirectional", "--encoder-hidden-size", "512", "--encoder-layers", "5", "--decoder-layers", "1", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_laser_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_transformer", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_alignment_full_context(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", "--full-context-alignment", ], run_validation=True, ) generate_main(data_dir) def test_transformer_layerdrop(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01", ], ) generate_main(data_dir) generate_main( data_dir, [ "--model-overrides", "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}", ], ) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ "--encoder-layers", "[(128, 3)] * 2", "--decoder-layers", "[(128, 3)] * 2", "--decoder-attention", "True", "--encoder-attention", "False", "--gated-attention", "True", "--self-attention", "True", "--project-input", "True", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--multihead-self-attention-nheads", "2", ] train_translation_model(data_dir, "fconv_self_att_wp", config) generate_main(data_dir) # fusion model os.rename( os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "pretrained.pt"), ) config.extend( [ "--pretrained", "True", "--pretrained-checkpoint", os.path.join(data_dir, "pretrained.pt"), "--save-dir", os.path.join(data_dir, "fusion_model"), ] ) train_translation_model(data_dir, "fconv_self_att_wp", config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "fconv_lm", [ "--decoder-layers", "[(850, 3)] * 2 + [(1024,4)]", "--decoder-embed-dim", "280", "--optimizer", "nag", "--lr", "0.1", ], ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--add-bos-token", "--nval", "1"], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_normformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--nval", "1", "--scale-fc", "--scale-heads", "--scale-attn", "--scale-fc", ], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm_with_adaptive_softmax(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_lm_with_adaptive_softmax" ) as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", ], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lightconv_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token", "--residuals"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) @unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing") def test_transformer_xl_bptt_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) task_flags = [ "--user-dir", "examples/truncated_bptt", "--task", "truncated_bptt_lm", "--batch-size", "2", "--tokens-per-sample", "50", ] train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) eval_lm_main(data_dir, extra_flags=task_flags) # Train with activation offloading train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", "--offload-activations", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"] ) def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_linformer_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "linformer_roberta_base", extra_flags=[ "--user-dir", "examples/linformer/linformer_src", "--encoder-layers", "2", ], ) def test_linformer_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=["--user-dir", "examples/linformer/linformer_src"], ) def test_linformer_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def test_linformer_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=("--encoder-learned-pos",) if learned_pos_emb else (), ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (["--init-encoder-only"] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def test_r4f_roberta(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=[ "--user-dir", "examples/rxf/rxf_src", "--criterion", "sentence_prediction_r3f", "--spectral-norm-classification-head", ], ) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--stop-min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", "--num-workers", "0", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_optimizers") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"] last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt") for optimizer in optimizers: if os.path.exists(last_checkpoint): os.remove(last_checkpoint) train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", optimizer, ], ) generate_main(data_dir) def read_last_log_entry( logs: List[logging.LogRecord], logger_name: str ) -> Dict[str, float]: for x in reversed(logs): if x.name == logger_name: return json.loads(x.message) raise ValueError(f"No entries from {logger_name} found in captured logs") class TestActivationCheckpointing(unittest.TestCase): base_flags = [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--restore-file", "x.pt", "--log-format", "json", "--log-interval", "1", "--max-update", "2", ] def _train(self, data_dir, extra_flags): with self.assertLogs() as logs: train_translation_model( data_dir, "transformer_iwslt_de_en", self.base_flags + extra_flags, run_validation=True, extra_valid_flags=["--log-format", "json"], ) return logs.records def test_activation_offloading_does_not_change_metrics(self): """Neither ----checkpoint-activations nor --offload-activations should change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) offload_logs = self._train(data_dir, ["--offload-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(offload_logs) baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") offload_valid_stats = read_last_log_entry(offload_logs, "valid") baseline_train_stats = read_last_log_entry(baseline_logs, "train") offload_train_stats = read_last_log_entry(offload_logs, "train") assert ( baseline_train_stats["train_loss"] == offload_train_stats["train_loss"] ) assert ( baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"] ) def test_activation_checkpointing_does_not_change_metrics(self): """--checkpoint-activations should not change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) ckpt_logs = self._train(data_dir, ["--checkpoint-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(ckpt_logs) baseline_train_stats = read_last_log_entry(baseline_logs, "train") ckpt_train_stats = read_last_log_entry(ckpt_logs, "train") assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"] baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid") assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"] def create_dummy_roberta_head_data( data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False ): input_dir = "input0" def _create_dummy_data(filename): random_data = torch.rand(num_examples * maxlen) input_data = 97 + torch.floor(26 * random_data).int() if regression: output_data = torch.rand((num_examples, num_classes)) else: output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in: label_filename = filename + ".label" if regression else filename + ".out" with open(os.path.join(data_dir, "label", label_filename), "w") as f_out: offset = 0 for i in range(num_examples): # write example input ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, input_data[offset : offset + ex_len])) print(ex_str, file=f_in) # write example label if regression: class_str = " ".join(map(str, output_data[i].numpy())) print(class_str, file=f_out) else: class_str = "class{}".format(output_data[i]) print(class_str, file=f_out) offset += ex_len os.mkdir(os.path.join(data_dir, input_dir)) os.mkdir(os.path.join(data_dir, "label")) _create_dummy_data("train") _create_dummy_data("valid") _create_dummy_data("test") def train_masked_lm(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "masked_lm", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "masked_lm", "--batch-size", "500", "--required-batch-size-multiple", "1", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "sentence_prediction", data_dir, "--arch", arch, "--encoder-layers", "2", "--num-classes", str(num_classes), "--optimizer", "adam", "--lr", "0.0001", "--criterion", "sentence_prediction", "--max-tokens", "500", "--max-positions", "500", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def eval_lm_main(data_dir, extra_flags=None): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) eval_lm.main(eval_lm_args) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_binaries.py
import unittest import torch from fairseq.modules import RelPositionalEncoding import numpy as np class TestRelPositionalEncoding(unittest.TestCase): def setUp(self) -> None: self.T = 3 self.B = 1 self.C = 2 torch.manual_seed(0) self.sample = torch.randn(self.T, self.B, self.C) # TBC self.rel_pos_enc = RelPositionalEncoding(max_len=4, d_model=self.C) def test_extend_pe(self): inp = self.sample.transpose(0, 1) self.rel_pos_enc.extend_pe(inp) expected_pe = torch.tensor( [ [ [0.1411, -0.9900], [0.9093, -0.4161], [0.8415, 0.5403], [0.0000, 1.0000], [-0.8415, 0.5403], [-0.9093, -0.4161], [-0.1411, -0.9900], ] ] ) self.assertTrue( np.allclose( expected_pe.cpu().detach().numpy(), self.rel_pos_enc.pe.cpu().detach().numpy(), atol=1e-4, ) ) def test_forward(self): pos_enc = self.rel_pos_enc(self.sample) expected_pos_enc = torch.tensor( [ [[0.9093, -0.4161]], [[0.8415, 0.5403]], [[0.0000, 1.0000]], [[-0.8415, 0.5403]], [[-0.9093, -0.4161]], ] ) self.assertTrue( np.allclose( pos_enc.cpu().detach().numpy(), expected_pos_enc.cpu().detach().numpy(), atol=1e-4, ) ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_positional_encoding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import List import torch from fairseq.token_generation_constraints import ( ConstraintNode, OrderedConstraintState, UnorderedConstraintState, pack_constraints, ) def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ([[]], torch.tensor([[0]])), ([[], []], torch.tensor([[0], [0]])), ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), ( [ [ torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7]), ], [], [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], ], torch.tensor( [ [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], ] ), ), ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", # noqa {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, ), ([], "[None].False#0", {}), (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", {1: 2, 2: 2}, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", {1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 2, 94], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 3, 999, 1, 4], {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, ), ( self.examples[0][0], [4, 5, 6, 8], {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[4][0], [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( self.examples[4][0], [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ( self.examples[5][0], [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ( ConstraintNode.print_graph(c) == expected ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert ( c.token_counts() == gold_counts ), f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert ( all_tokens == state.next_tokens() ), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1], [2, 3]]), [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import tests.utils as test_utils import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, ) class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, eos]), }, { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, w1, eos]), }, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.report_accuracy = False self.args.probs = ( torch.FloatTensor( [ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ] ) .unsqueeze(0) .expand(2, 3, 7) ) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import random import shutil import string import sys import typing as tp from io import StringIO import torch import torch.nn.functional as F import fairseq.distributed.utils as distributed_utils from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import LegacyFairseqTask from fairseq_cli import generate, interactive, preprocess, train, validate def dummy_dictionary(vocab_size, prefix="token_"): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if "id" not in sample: sample["id"] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [ 0.6, unk, 0.2, 0.2, ], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [ 0.60, unk, 0.4, 0.00, ], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [ 0.1, unk, 0.5, 0.4, ], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [ 1.0, unk, 0.0, 0.0, ], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ] ), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data( data_dir, num_examples=100, maxlen=20, alignment=False, languages=None ): def _create_dummy_data(dir, filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(dir, filename), "w") as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, data[offset : offset + ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), "r") as src_f, open( os.path.join(data_dir, filename_tgt), "r" ) as tgt_f, open(os.path.join(data_dir, filename), "w") as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = " ".join( [ "{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices) ] ) print(ex_str, file=h) files_to_write = [ "train.in", "train.out", "valid.in", "valid.out", "test.in", "test.out", ] if languages is None: # En only dummy dataset for f in files_to_write: _create_dummy_data(data_dir, f) else: for lang in languages: lang_dir = os.path.join(data_dir, lang) os.makedirs(lang_dir, exist_ok=True) for f in files_to_write: _create_dummy_data(lang_dir, f) if alignment: _create_dummy_alignment_data("train.in", "train.out", "train.align") _create_dummy_alignment_data("valid.in", "valid.out", "valid.align") _create_dummy_alignment_data("test.in", "test.out", "test.align") def preprocess_lm_data(data_dir, languages=None): preprocess_parser = options.get_preprocessing_parser() if languages is None: preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(data_dir, "train.out"), "--validpref", os.path.join(data_dir, "valid.out"), "--testpref", os.path.join(data_dir, "test.out"), "--destdir", data_dir, ] ) preprocess.main(preprocess_args) else: for lang in languages: lang_dir = os.path.join(data_dir, lang) assert os.path.exists(lang_dir) preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(lang_dir, "train.out"), "--validpref", os.path.join(lang_dir, "valid.out"), "--testpref", os.path.join(lang_dir, "test.out"), "--destdir", lang_dir, ] ) preprocess.main(preprocess_args) shutil.copyfile( os.path.join(data_dir, languages[0], "dict.txt"), os.path.join(data_dir, "dict.txt"), ) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def preprocess_summarization_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--joined-dictionary", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def create_laser_data_and_config_json(data_dir): src_langs = ["de", "fr", "ru", "tr", "zh"] tgt_langs = ["en", "es"] config_json = {} config_train_json = [] src_vocab = None tgt_vocab = None for src_lang in src_langs: for tgt_lang in tgt_langs: langpair_folder = f"{src_lang}-{tgt_lang}" langpair_path = os.path.join(data_dir, langpair_folder) os.mkdir(langpair_path) create_dummy_data(langpair_path) preprocess_translation_data(langpair_path, ["--dataset-impl", "cached"]) src_vocab = os.path.join(langpair_path, "dict.in.txt") tgt_vocab = os.path.join(langpair_path, "dict.out.txt") config_train_json.append( { "id": 0 if tgt_lang == "en" else 1, "src": os.path.join(langpair_path, "train.in-out.in"), "tgt": os.path.join(langpair_path, "train.in-out.out"), } ) config_json["src_vocab"] = src_vocab config_json["tgt_vocab"] = tgt_vocab config_json["train"] = config_train_json with open(os.path.join(data_dir, "laserconfig.json"), "w") as config_file: json.dump(config_json, config_file) return config_file def train_translation_model( data_dir, arch, extra_flags=None, task="translation", run_validation=False, lang_flags=None, extra_valid_flags=None, world_size=1, ): if lang_flags is None: lang_flags = [ "--source-lang", "in", "--target-lang", "out", ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--save-dir", data_dir, "--arch", arch, "--optimizer", "nag", "--lr", "0.05", "--max-tokens", "500", "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--num-workers", "0", ] + lang_flags + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + lang_flags + (extra_valid_flags or []), ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None, path=None): if extra_flags is None: extra_flags = [ "--print-alignment", ] if path is None: path = os.path.join(data_dir, "checkpoint_last.pt") generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, "--path", path, "--beam", "3", "--batch-size", "64", "--max-len-b", "5", "--gen-subset", "valid", "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = "-" generate_args.batch_size = None orig_stdin = sys.stdin sys.stdin = StringIO("h e l l o\n") interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(LegacyFairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args, from_checkpoint=False): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, "beam_probs") or hasattr(args, "probs") args.max_decoder_positions = getattr(args, "max_decoder_positions", 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, "step") if step is None: step = 0 utils.set_incremental_state(self, incremental_state, "step", step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, "probs"): assert ( self.args.probs.dim() == 3 ), "expected probs to have size bsz*steps*vocab" probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert "fancy_other_input" in kwargs assert kwargs["fancy_other_input"] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def train_language_model( data_dir, arch, extra_flags=None, run_validation=False, extra_valid_flags=None, task="language_modeling", world_size=1, ): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + (extra_valid_flags or []), ) validate.main(validate_args) def sizes(data): return [len(sentence) for sentence in data] POPULATION = string.ascii_letters + string.digits def make_sentence() -> tp.List[str]: length = random.randint(10, 50) return random.choices( population=POPULATION, k=length, weights=range(1, len(POPULATION) + 1) ) def make_data(length=1000, out_file=None) -> tp.List[tp.List[str]]: data = ( [make_sentence() for _ in range(0, length)] # add all the symbols at least once + [list(string.ascii_letters), list(string.digits)] ) if out_file is not None: with open(out_file, "w", encoding="utf-8") as out: for s in data: print(" ".join(s), file=out) return data def build_vocab(data: tp.List[tp.List[str]]) -> Dictionary: d = Dictionary() for s in data: for token in s: d.add_symbol(token) d.finalize() return d
EXA-1-master
exa/libraries/fairseq/tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from typing import Sequence from fairseq.data import LanguagePairDataset, ListDataset, RoundRobinZipDatasets from tests.test_train import mock_dict def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset: tokens = [[i] * l for i, l in enumerate(lengths)] return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict()) def sample(id: int, length: int): return {"id": id, "source": [id] * length, "target": None} class TestDataset(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_round_robin_zip_datasets(self): long_dataset = lang_pair_dataset([10, 9, 8, 11]) short_dataset = lang_pair_dataset([11, 9]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length dataset.ordered_indices() assert dataset.longest_dataset is long_dataset self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)}) # The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b' self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)}) def test_round_robin_zip_datasets_filtered(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900}) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)}) def test_round_robin_zip_datasets_filtered_with_tuple(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, 19) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)})
EXA-1-master
exa/libraries/fairseq/tests/test_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules import ConvTBC class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual( output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data ) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual( conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data ) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual( input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data ) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_dataset import MultiCorpusDataset from tests.test_train import mock_dict class TestMultiCorpusDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([i for i in range(1, 5000, 2)]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([i for i in range(0, 5000, 2)]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, distribution, ): m = MultiCorpusDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), distribution=distribution, seed=0, sort_indices=True, ) m.set_epoch(1) indices = m.ordered_indices() count_sample_from_first_dataset = 0 items = set() for i in indices: item = m[i]["source"].item() if item % 2 == 1: count_sample_from_first_dataset += 1 items.add(item) sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / len(indices) ) self.assertLess( abs(sample_from_first_ds_percentage - distribution[0]), 0.01, ) self.assertEqual( len(items), int( min(len(self.dataset_1), len(indices) * distribution[0]) + min(len(self.dataset_1), len(indices) * distribution[1]) ), ) print(distribution) def test_multi_corpus_dataset(self): for distribution in [[0.5, 0.5], [0.1, 0.9], [0.9, 0.1], [0.0, 1.0]]: self._test_sample_helper(distribution=distribution)
EXA-1-master
exa/libraries/fairseq/tests/test_multi_corpus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import logging import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.cfg_dls = OmegaConf.create( { "optimization": { "lr": [0.1], }, "optimizer": { "_name": "adam", "lr": [0.1], "adam_betas": "(0.9, 0.999)", "adam_eps": 1e-8, "weight_decay": 0.0, }, "common": { "fp16_init_scale": 1, "fp16_scale_window": 1, "fp16_scale_tolerance": 1, "threshold_loss_scale": 1, "min_loss_scale": 1e-4, "tpu": False, }, } ) logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual( model.weight, torch.tensor( [[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual( model.bias, torch.tensor( [5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual(optimizer.scaler.loss_scale, 2.0) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) self.assertTrue( all( torch.all( fp32_params.eq( torch.tensor( [3.1000, 5.1000], device="cuda:0", requires_grad=True ) ) ) for fp32_params in optimizer.fp32_params.values() ) ) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer # with pseudo DictConfigs optimizer = FairseqAdam( cfg=OmegaConf.create( vars( argparse.Namespace( adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, lr=[0.00001], ) ) ), params=params, ) me_optimizer = MemoryEfficientFP16Optimizer( cfg=OmegaConf.create( { "common": vars( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ) ) } ), params=params, optimizer=optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import TokenBlockDataset class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none") self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=3, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) def test_4billion_tokens(self): """Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745""" data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000 ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) ds[-1] # __getitem__ works start, end = ds.slice_indices[-1] assert end > 4294967295 # data must be sufficiently large to overflow uint32 assert not isinstance( end + 1, float ) # this would also raise, since np.uint64(1) + 1 => 2.0 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import math import tempfile import unittest import numpy as np import torch import tests.utils as test_utils from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.ngram_repeat_block import NGramRepeatBlock from fairseq.sequence_generator import EnsembleModel, SequenceGenerator from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), n=1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) JIT_MSG = "Targeting OSS scriptability for the 1.6 release" @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) class TestJitSequenceGenerator(TestJitSequenceGeneratorBase): def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2, max_len_b=10, ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequenceGenerator(TestSequenceGeneratorBase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model, ) = test_utils.sequence_generator_setup() self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator( [reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]["attention"] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"] hypos = generator.forward(self.sample) eos, w1 = self.tgt_dict.eos(), self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) @unittest.skipUnless(torch.cuda.is_available(), "") class TestRepeatNgramBlocking(TestSequenceGeneratorBase): @classmethod def setUpClass(cls): ( cls.tgt_dict, cls.w1, cls.w2, src_tokens, src_lengths, cls.model, ) = test_utils.sequence_generator_setup() return cls def test_finds_repetitive_tokens(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") desired_result = lprobs.new_tensor( [[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]] ) cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem( bsz, beam_size, generated_tok, lprobs, step, 2 ) self.assertTensorEqual(cuda_ext_result, desired_result) self.assertTensorEqual(baseline_result, desired_result) @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) def test_jit_no_extension(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") blocker = NGramRepeatBlock(2, use_extension=False) base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step) scripted_blocker = torch.jit.script(blocker) jit_result = scripted_blocker( generated_tok, lprobs.clone(), bsz, beam_size, step ) self.assertTensorEqual(base_result, jit_result) def test_ngram_blocking_same_as_default_implem(self): """Test that cuda extension returns same things as default impl in many settings.""" vocab_size = 4 step = 6 for _ in range(2): block_param = np.random.choice([1, 2, 3, 4]) batch_size = np.random.randint(1, 8) beam_size = np.random.choice([1, 2, 4, 8]) lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda") generated_tok = torch.tensor( np.random.randint( 0, vocab_size, size=(batch_size * beam_size, step + 1) ), device="cuda", dtype=torch.long, ) self._compare_cuda_ext_to_default_implem( batch_size, beam_size, generated_tok, lprobs, step, block_param, ) def _compare_cuda_ext_to_default_implem( self, bsz, beam_size, generated_tok, lprobs, step, block_param ): """Assert that cuda extension and default implem return the same thing.""" blocker = NGramRepeatBlock(block_param) assert blocker.use_extension, "Extension not compiled" cuda_ext_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) blocker.use_extension = False baseline_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) self.assertTensorEqual(cuda_ext_result, baseline_result) blocker.use_extension = True return cuda_ext_result, baseline_result class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=high_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue( self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]) ) # sentence 1, beam 2 self.assertTrue( self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]) ) # sentence 2, beam 1 self.assertTrue( self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]) ) # sentence 2, beam 2 self.assertTrue( self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]) ) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo["positional_scores"], pos_scores): return False if pos_scores.numel() != hypo["tokens"].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo["score"]) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_sequence_generator.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import tempfile import unittest from typing import Optional class TestFileChunker(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _line_content = "Hello, World\n" _num_bytes = None _num_lines = 200 _num_splits = 20 @classmethod def setUpClass(cls) -> None: cls._num_bytes = len(cls._line_content.encode("utf-8")) cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name for _i in range(cls._num_lines): f.write(cls._line_content) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_find_offsets(self): from fairseq.file_chunker_utils import find_offsets offsets = find_offsets(self._tmpfile, self._num_splits) self.assertEqual(len(offsets), self._num_splits + 1) (zero, *real_offsets, last) = offsets self.assertEqual(zero, 0) for i, o in enumerate(real_offsets): self.assertEqual( o, self._num_bytes + ((i + 1) * self._num_bytes * self._num_lines / self._num_splits), ) self.assertEqual(last, self._num_bytes * self._num_lines) def test_readchunks(self): from fairseq.file_chunker_utils import Chunker, find_offsets offsets = find_offsets(self._tmpfile, self._num_splits) for start, end in zip(offsets, offsets[1:]): with Chunker(self._tmpfile, start, end) as lines: all_lines = list(lines) num_lines = self._num_lines / self._num_splits self.assertAlmostEqual( len(all_lines), num_lines, delta=1 ) # because we split on the bites, we might end up with one more/less line in a chunk self.assertListEqual( all_lines, [self._line_content for _ in range(len(all_lines))] )
EXA-1-master
exa/libraries/fairseq/tests/test_file_chunker_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from argparse import ArgumentParser from dataclasses import dataclass, field from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import gen_parser_from_dataclass @dataclass class A(FairseqDataclass): data: str = field(default="test", metadata={"help": "the data input"}) num_layers: int = field(default=200, metadata={"help": "more layers is better?"}) @dataclass class B(FairseqDataclass): bar: A = field(default=A()) foo: int = field(default=0, metadata={"help": "not a bar"}) @dataclass class D(FairseqDataclass): arch: A = field(default=A()) foo: int = field(default=0, metadata={"help": "not a bar"}) @dataclass class C(FairseqDataclass): data: str = field(default="test", metadata={"help": "root level data input"}) encoder: D = field(default=D()) decoder: A = field(default=A()) lr: int = field(default=0, metadata={"help": "learning rate"}) class TestDataclassUtils(unittest.TestCase): def test_argparse_convert_basic(self): parser = ArgumentParser() gen_parser_from_dataclass(parser, A(), True) args = parser.parse_args(["--num-layers", "10", "the/data/path"]) self.assertEqual(args.num_layers, 10) self.assertEqual(args.data, "the/data/path") def test_argparse_recursive(self): parser = ArgumentParser() gen_parser_from_dataclass(parser, B(), True) args = parser.parse_args(["--num-layers", "10", "--foo", "10", "the/data/path"]) self.assertEqual(args.num_layers, 10) self.assertEqual(args.foo, 10) self.assertEqual(args.data, "the/data/path") def test_argparse_recursive_prefixing(self): self.maxDiff = None parser = ArgumentParser() gen_parser_from_dataclass(parser, C(), True, "") args = parser.parse_args( [ "--encoder-arch-data", "ENCODER_ARCH_DATA", "--encoder-arch-num-layers", "10", "--encoder-foo", "10", "--decoder-data", "DECODER_DATA", "--decoder-num-layers", "10", "--lr", "10", "the/data/path", ] ) self.assertEqual(args.encoder_arch_data, "ENCODER_ARCH_DATA") self.assertEqual(args.encoder_arch_num_layers, 10) self.assertEqual(args.encoder_foo, 10) self.assertEqual(args.decoder_data, "DECODER_DATA") self.assertEqual(args.decoder_num_layers, 10) self.assertEqual(args.lr, 10) self.assertEqual(args.data, "the/data/path") if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_dataclass_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset([self.dataset_1, self.dataset_2]) assert len(d) == 2 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 assert d[2]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 1 assert d[2]["source"][0] == 2
EXA-1-master
exa/libraries/fairseq/tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import tempfile import unittest from pathlib import Path from typing import Any, Dict, Sequence import fairseq.data.indexed_dataset as indexed_dataset import fairseq.options import fairseq.tasks.online_backtranslation as obt import torch from tests import utils def mk_sample(tokens: Sequence[int], batch_size: int = 2) -> Dict[str, Any]: batch = torch.stack([torch.tensor(tokens, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor([len(tokens)] * batch_size, dtype=torch.long), }, "target": batch[:, 1:], } return sample def mk_dataset(num_samples: int, max_len: int, output: Path): output.parent.mkdir(exist_ok=True) idx = indexed_dataset.IndexedDatasetBuilder(str(output)) data = torch.randint(5, 100, (num_samples, max_len)) lengths = torch.randint(3, max_len, (num_samples,)) for d, l in zip(data, lengths): d[0] = 0 idx.add_item(d[:l]) idx.finalize(output.with_suffix(".idx")) assert output.exists() assert output.with_suffix(".idx").exists() class OnlineBacktranslationTest(unittest.TestCase): tmp_dir = Path(tempfile.mkdtemp(suffix="OnlineBacktranslationTest")) @classmethod def obt_task( cls, languages: Sequence[str], data: Path = None, language_mapping: str = None ): dict_path = cls.tmp_dir / "dict.txt" if not dict_path.exists(): dictionary = utils.dummy_dictionary(100) dictionary.save(str(dict_path)) if data is not None: (data / "dict.txt").write_text(dict_path.read_text()) else: data = cls.tmp_dir assert len(languages) >= 2 kwargs = { "arch": "transformer", # --max-sentences=1 for better predictability of batches "max_sentences": 1, # Use characteristics dimensions "encoder_layers": 3, "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "encoder_attention_heads": 4, "decoder_layers": 3, "decoder_embed_dim": 12, "decoder_output_dim": 12, "decoder_ffn_embed_dim": 14, "decoder_attention_heads": 4, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } args = fairseq.options.get_args( data, task="online_backtranslation", mono_langs=",".join(languages), valid_lang_pairs=f"{languages[0]}-{languages[1]}", tokens_per_sample=256, language_mapping=language_mapping, **kwargs, ) task = obt.OnlineBackTranslationTask.setup_task(args) # we need to build the model to have the correct dictionary model = task.build_model(task.args) return task, model def tmp_path(self, test_case: str) -> Path: return Path(tempfile.mkdtemp(test_case, dir=self.tmp_dir)) def test_lang_tokens(self): task, model = self.obt_task(["en", "ro", "zh"]) assert obt._lang_token("en") in task.dictionary assert obt._lang_token("ro") in task.dictionary assert obt._lang_token("zh") in task.dictionary en_bos = obt._lang_token_index(task.common_dict, "en") assert "en" == task.common_dict[en_bos].strip("_") zh_bos = obt._lang_token_index(task.common_dict, "zh") assert "zh" == task.common_dict[zh_bos].strip("_") zh_sample = mk_sample([zh_bos, 16, 14, 12, 10]) # we expect to receive the bos token for translation assert task.get_bos_token_from_sample(zh_sample) == en_bos def test_backtranslate_sample(self): task, model = self.obt_task(["en", "ro", "zh"]) en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") sample = mk_sample([zh_bos, 16, 14, 12, 10]) task.backtranslate_sample(sample, "zh", "en") target_zh = list(sample["target"][0]) assert target_zh == [16, 14, 12, 10] # original zh sentence generated_en = sample["net_input"]["src_tokens"][0] assert generated_en[0] == en_bos def test_train_dataset(self): data = self.tmp_path("test_train_dataset") mk_dataset(20, 10, data / "en" / "train.bin") mk_dataset(10, 10, data / "zh" / "train.bin") task, model = self.obt_task(["en", "zh"], data) task.load_dataset("train") en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") train = task.datasets["train"] train.ordered_indices() train.prefetch([0, 19]) sample_0 = train[0] sample_19 = train[19] self.assertEqual( set(sample_0.keys()), {"en-BT", "en-DENOISE", "zh-BT", "zh-DENOISE"} ) for sample in (sample_0, sample_19): self.assertEqual(sample["en-BT"]["source"][0], en_bos) # bt target isn't ready to look at. self.assertEqual(sample["en-DENOISE"]["source"][0], en_bos) # TODO What could we check on the target side ? for i in range(10): # Zh dataset is shorter, and is wrapped around En dataset. train.prefetch([i, i + 10]) self.assertEqual( list(train[i]["zh-DENOISE"]["source"]), list(train[i + 10]["zh-DENOISE"]["source"]), ) self.assertEqual(train[i]["zh-DENOISE"]["source"][0].item(), zh_bos) # Sorted by increasing len self.assertLess( len(sample_0["en-BT"]["source"]), len(sample_19["en-BT"]["source"]) ) def test_valid_dataset(self): data = self.tmp_path("test_valid_dataset") mk_dataset(10, 21, data / "valid.en-zh.en.bin") mk_dataset(10, 21, data / "valid.en-zh.zh.bin") task, model = self.obt_task(["en", "zh"], data) valid = task.load_dataset("valid") en_bos = obt._lang_token_index(task.common_dict, "en") assert valid is not None valid.prefetch(range(10)) sample_0 = valid[0] sample_9 = valid[9] self.assertEqual(sample_0["id"], 0) self.assertEqual(sample_9["id"], 9) self.assertEqual(sample_0["source"][0], en_bos) self.assertEqual(sample_9["source"][0], en_bos) # TODO: could we test the target side ? def assertFnMatch(self, fn, values): for x, y in values.items(): fn_x = fn(x) self.assertEqual(fn_x, y, f"Fn has wrong value: fn({x}) = {fn_x} != {y}") def test_piecewise_linear_fn(self): self.assertFnMatch( obt.PiecewiseLinearFn.from_string("1.0"), {0: 1, 100: 1, 500: 1, 1000: 1} ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:1,1000:0"), {0: 1, 500: 0.5, 1000: 0, 2000: 0}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1"), {0: 0, 500: 0.5, 1000: 1, 2000: 1}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1,2000:0"), {0: 0, 500: 0.5, 1000: 1, 1500: 0.5, 2000: 0, 3000: 0}, )
EXA-1-master
exa/libraries/fairseq/tests/test_online_backtranslation.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch try: import huggingface_hub except ImportError: huggingface_hub = None from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub @unittest.skipIf(not huggingface_hub, "Requires huggingface_hub install") class TestHuggingFaceHub(unittest.TestCase): @torch.no_grad() def test_hf_fastspeech2(self): hf_model_id = "facebook/fastspeech2-en-ljspeech" models, cfg, task = load_model_ensemble_and_task_from_hf_hub(hf_model_id) self.assertTrue(len(models) > 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_hf_hub.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import MonolingualDataset from fairseq.tasks.language_modeling import LanguageModelingConfig, LanguageModelingTask from tests import utils as test_utils class TestLMContextWindow(unittest.TestCase): def test_eval_dataloader(self): dictionary = test_utils.dummy_dictionary(10) assert len(dictionary) == 14 # 4 extra special symbols assert dictionary.pad() == 1 dataset = test_utils.TestDataset( [ torch.tensor([4, 5, 6, 7], dtype=torch.long), torch.tensor([8, 9, 10, 11], dtype=torch.long), torch.tensor([12, 13], dtype=torch.long), ] ) dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary) config = LanguageModelingConfig(tokens_per_sample=4) task = LanguageModelingTask(config, dictionary) eval_dataloader = task.eval_lm_dataloader( dataset=dataset, batch_size=1, context_window=2, num_workers=0, ) batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1] assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11] assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13] assert batch["target"][0].tolist() == [1, 1, 12, 13] if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_lm_context_window.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import sys import tempfile import unittest from typing import Optional from unittest.mock import MagicMock class TestFileIO(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _tmpfile_contents = "Hello, World" @classmethod def setUpClass(cls) -> None: cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name f.write(cls._tmpfile_contents) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_file_io(self): from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_oss(self): # Mock iopath to simulate oss environment. sys.modules["iopath"] = MagicMock() from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_async(self): # ioPath `PathManager` is initialized after the first `opena` call. try: from fairseq.file_io import PathManager _asyncfile = os.path.join(self._tmpdir, "async.txt") f = PathManager.opena(_asyncfile, "wb") f.close() finally: self.assertTrue(PathManager.async_close())
EXA-1-master
exa/libraries/fairseq/tests/test_file_io.py
import argparse import unittest from typing import Any, Dict, Sequence import torch from fairseq.models import transformer from tests.test_roberta import FakeTask def mk_sample(tok: Sequence[int] = None, batch_size: int = 2) -> Dict[str, Any]: if not tok: tok = [10, 11, 12, 13, 14, 15, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def mk_transformer(**extra_args: Any): overrides = { # Use characteristics dimensions "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } overrides.update(extra_args) # Overrides the defaults from the parser args = argparse.Namespace(**overrides) transformer.tiny_architecture(args) torch.manual_seed(0) task = FakeTask(args) return transformer.TransformerModel.build_model(args, task) class TransformerTestCase(unittest.TestCase): def test_forward_backward(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=12) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward() def test_different_encoder_decoder_embed_dim(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=16) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward()
EXA-1-master
exa/libraries/fairseq/tests/test_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import tempfile import unittest import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint="checkpoint1.pt", max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if isinstance(log_record.msg, str) and search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, "train_loss") valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss") # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, "checkpoint_last.pt"), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string( logs.records, "train_loss" ) valid_res_log = get_last_log_stats_containing_string( logs.records, "valid_loss" ) for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]: self.assertAlmostEqual( float(train_log[k]), float(train_res_log[k]), delta=delta ) for k in [ "valid_loss", "valid_ppl", "valid_num_updates", "valid_best_loss", ]: self.assertAlmostEqual( float(valid_log[k]), float(valid_res_log[k]), delta=delta ) def test_reproducibility(self): self._test_reproducibility("test_reproducibility") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_fp16(self): self._test_reproducibility( "test_reproducibility_fp16", [ "--fp16", "--fp16-init-scale", "4096", ], delta=0.011, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility( "test_reproducibility_memory_efficient_fp16", [ "--memory-efficient-fp16", "--fp16-init-scale", "4096", ], ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_amp(self): self._test_reproducibility( "test_reproducibility_amp", [ "--amp", "--fp16-init-scale", "4096", ], delta=0.011, ) def test_mid_epoch_reproducibility(self): self._test_reproducibility( "test_mid_epoch_reproducibility", ["--save-interval-updates", "3"], resume_checkpoint="checkpoint_1_3.pt", max_epoch=1, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("there") embedder = CharacterTokenEmbedder( vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2 ) test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import shutil import tempfile import unittest import numpy as np import torch from scripts.average_checkpoints import average_checkpoints from torch import nn class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ("a", torch.DoubleTensor([100.0])), ("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ("c", torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ("a", torch.DoubleTensor([1.0])), ("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ("c", torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ("a", torch.DoubleTensor([50.5])), ("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ("c", torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([("model", params_0)]), path_0) torch.save(collections.OrderedDict([("model", params_1)]), path_1) output = average_checkpoints([path_0, path_1])["model"] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items() ): self.assertEqual( k_expected, k_out, "Key mismatch - expected {} but found {}. " "(Expected list of keys: {} vs actual list of keys: {})".format( k_expected, k_out, params_avg.keys(), output.keys() ), ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg="Tensor value mismatch for key {}".format(k_expected), ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save({"model": m.state_dict()}, path) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model["model"]["embedding.weight"], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC1.weight"], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC2.weight"], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0, ) ) shutil.rmtree(tmpdir) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer_no_token_pos_emb(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) args.no_token_positional_embeddings = True model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_export.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules.checkpoint_activations import checkpoint_wrapper from torch.utils.checkpoint import checkpoint class Model(nn.Module): def __init__( self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False, **kwargs ): super().__init__() torch.manual_seed(0) self.use_pytorch_checkpoint = use_pytorch_checkpoint self.ffn = nn.Sequential( nn.Linear(32, 128), # add a Dropout layer to test RNG save/restore nn.Dropout(p=0.5), nn.Linear(128, 32), ) if use_fairseq_checkpoint: self.ffn = checkpoint_wrapper(self.ffn, **kwargs) self.out = nn.Linear(32, 1) def forward(self, x): if self.use_pytorch_checkpoint: x = checkpoint(self.ffn, x) else: x = self.ffn(x) return self.out(x) class TestActivationCheckpointing(unittest.TestCase): def _test_checkpoint_wrapper(self, device, log_memory_usage=False): def get_loss_and_gnorm(model): torch.manual_seed(1) input = torch.rand(2, 16, 32).requires_grad_(True).to(device) model.zero_grad() loss = model(input).sum() loss.backward() gnorm = torch.norm( torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]) ) return {"loss": loss, "gnorm": gnorm} model = Model().to(device) no_cpt = get_loss_and_gnorm(model) model = Model(use_pytorch_checkpoint=True).to(device) pyt_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True).to(device) fairseq_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True, offload_to_cpu=True).to(device) fairseq_cpt_offload = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt_offload["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt_offload["gnorm"]) def test_checkpoint_wrapper_cpu(self): self._test_checkpoint_wrapper(device=torch.device("cpu")) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_checkpoint_wrapper_cuda(self): self._test_checkpoint_wrapper(device=torch.device("cuda")) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/test_activation_checkpointing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(input): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
EXA-1-master
exa/libraries/fairseq/tests/test_multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import unittest from tempfile import TemporaryDirectory from fairseq.binarizer import FileBinarizer, VocabularyDatasetBinarizer from fairseq.tasks.masked_lm import MaskedLMConfig, MaskedLMTask from tests.utils import build_vocab, make_data class TestMaskedLM(unittest.TestCase): def test_masks_tokens(self): with TemporaryDirectory() as dirname: # prep input file raw_file = os.path.join(dirname, "raw") data = make_data(out_file=raw_file) vocab = build_vocab(data) # binarize binarizer = VocabularyDatasetBinarizer(vocab, append_eos=False) split = "train" bin_file = os.path.join(dirname, split) FileBinarizer.multiprocess_dataset( input_file=raw_file, binarizer=binarizer, dataset_impl="mmap", vocab_size=len(vocab), output_prefix=bin_file, ) # setup task cfg = MaskedLMConfig( data=dirname, seed=42, mask_prob=0.5, # increasing the odds of masking random_token_prob=0, # avoiding random tokens for exact match leave_unmasked_prob=0, # always masking for exact match ) task = MaskedLMTask(cfg, binarizer.dict) original_dataset = task._load_dataset_split(bin_file, 1, False) # load datasets task.load_dataset(split) masked_dataset = task.dataset(split) mask_index = task.source_dictionary.index("<mask>") iterator = task.get_batch_iterator( dataset=masked_dataset, max_tokens=65_536, max_positions=4_096, ).next_epoch_itr(shuffle=False) for batch in iterator: for sample in range(len(batch)): net_input = batch["net_input"] masked_src_tokens = net_input["src_tokens"][sample] masked_src_length = net_input["src_lengths"][sample] masked_tgt_tokens = batch["target"][sample] sample_id = batch["id"][sample] original_tokens = original_dataset[sample_id] original_tokens = original_tokens.masked_select( masked_src_tokens[:masked_src_length] == mask_index ) masked_tokens = masked_tgt_tokens.masked_select( masked_tgt_tokens != task.source_dictionary.pad() ) assert masked_tokens.equal(original_tokens) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/tasks/test_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import unittest from tempfile import TemporaryDirectory from fairseq import options from fairseq.binarizer import FileBinarizer, VocabularyDatasetBinarizer from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks.denoising import DenoisingTask from tests.utils import build_vocab, make_data class TestDenoising(unittest.TestCase): def test_denoising(self): with TemporaryDirectory() as dirname: # prep input file raw_file = os.path.join(dirname, "raw") data = make_data(out_file=raw_file) vocab = build_vocab(data) # binarize binarizer = VocabularyDatasetBinarizer(vocab, append_eos=False) split = "train" bin_file = os.path.join(dirname, split) dataset_impl = "mmap" FileBinarizer.multiprocess_dataset( input_file=raw_file, binarizer=binarizer, dataset_impl=dataset_impl, vocab_size=len(vocab), output_prefix=bin_file, ) # setup task train_args = options.parse_args_and_arch( options.get_training_parser(), [ "--task", "denoising", "--arch", "bart_base", "--seed", "42", "--mask-length", "word", "--permute-sentences", "1", "--rotate", "0", "--replace-length", "-1", "--mask", "0.2", dirname, ], ) cfg = convert_namespace_to_omegaconf(train_args) task = DenoisingTask(cfg.task, binarizer.dict) # load datasets original_dataset = task._load_dataset_split(bin_file, 1, False) task.load_dataset(split) masked_dataset = task.dataset(split) iterator = task.get_batch_iterator( dataset=masked_dataset, max_tokens=65_536, max_positions=4_096, ).next_epoch_itr(shuffle=False) mask_index = task.source_dictionary.index("<mask>") for batch in iterator: for sample in range(len(batch)): net_input = batch["net_input"] masked_src_tokens = net_input["src_tokens"][sample] masked_src_length = net_input["src_lengths"][sample] masked_tgt_tokens = batch["target"][sample] sample_id = batch["id"][sample] original_tokens = original_dataset[sample_id] original_tokens = original_tokens.masked_select( masked_src_tokens[:masked_src_length] == mask_index ) masked_tokens = masked_tgt_tokens.masked_select( masked_src_tokens == mask_index ) assert masked_tokens.equal(original_tokens) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/tasks/test_denoising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import unittest from tempfile import TemporaryDirectory from fairseq import options from fairseq.binarizer import FileBinarizer, VocabularyDatasetBinarizer from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks.multilingual_denoising import MultilingualDenoisingTask from tests.utils import build_vocab, make_data class TestMultilingualDenoising(unittest.TestCase): def test_multilingual_denoising(self): with TemporaryDirectory() as dirname: # prep input file lang_dir = os.path.join(dirname, "en") os.mkdir(lang_dir) raw_file = os.path.join(lang_dir, "raw") data = make_data(out_file=raw_file) vocab = build_vocab(data) # binarize binarizer = VocabularyDatasetBinarizer(vocab, append_eos=False) split = "train" bin_file = os.path.join(lang_dir, split) dataset_impl = "mmap" FileBinarizer.multiprocess_dataset( input_file=raw_file, binarizer=binarizer, dataset_impl=dataset_impl, vocab_size=len(vocab), output_prefix=bin_file, ) # setup task train_args = options.parse_args_and_arch( options.get_training_parser(), [ "--task", "multilingual_denoising", "--arch", "bart_base", "--seed", "42", "--mask-length", "word", "--permute-sentences", "1", "--rotate", "0", "--replace-length", "-1", "--mask", "0.2", dirname, ], ) cfg = convert_namespace_to_omegaconf(train_args) task = MultilingualDenoisingTask(cfg.task, binarizer.dict) # load datasets original_dataset = task._load_dataset_split(bin_file, 1, False) task.load_dataset(split) masked_dataset = task.dataset(split) iterator = task.get_batch_iterator( dataset=masked_dataset, max_tokens=65_536, max_positions=4_096, ).next_epoch_itr(shuffle=False) mask_index = task.source_dictionary.index("<mask>") for batch in iterator: for sample in range(len(batch)): net_input = batch["net_input"] masked_src_tokens = net_input["src_tokens"][sample] masked_src_length = net_input["src_lengths"][sample] masked_tgt_tokens = batch["target"][sample] sample_id = batch["id"][sample] original_tokens = original_dataset[sample_id] original_tokens = original_tokens.masked_select( masked_src_tokens[:masked_src_length] == mask_index ) masked_tokens = masked_tgt_tokens.masked_select( masked_src_tokens == mask_index ) assert masked_tokens.equal(original_tokens) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/tasks/test_multilingual_denoising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import unittest from tempfile import TemporaryDirectory from fairseq import options from fairseq.binarizer import FileBinarizer, VocabularyDatasetBinarizer from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks.span_masked_lm import SpanMaskedLMTask from tests.utils import build_vocab, make_data class TestSpanMaskedLM(unittest.TestCase): def test_masks_token_spans(self): with TemporaryDirectory() as dirname: # prep input file raw_file = os.path.join(dirname, "raw") data = make_data(out_file=raw_file) vocab = build_vocab(data) # binarize binarizer = VocabularyDatasetBinarizer(vocab, append_eos=False) split = "train" bin_file = os.path.join(dirname, split) dataset_impl = "mmap" FileBinarizer.multiprocess_dataset( input_file=raw_file, binarizer=binarizer, dataset_impl=dataset_impl, vocab_size=len(vocab), output_prefix=bin_file, ) # adding sentinel tokens for i in range(100): vocab.add_symbol(f"<extra_id_{i}>") # setup task train_args = options.parse_args_and_arch( options.get_training_parser(), [ "--task", "span_masked_lm", "--arch", "bart_base", "--seed", "42", dirname, ], ) cfg = convert_namespace_to_omegaconf(train_args) task = SpanMaskedLMTask(cfg.task, binarizer.dict) # load datasets original_dataset = task._load_dataset_split(bin_file, 1, False) task.load_dataset(split) masked_dataset = task.dataset(split) iterator = task.get_batch_iterator( dataset=masked_dataset, max_tokens=65_536, max_positions=4_096, ).next_epoch_itr(shuffle=False) num_tokens = len(vocab) for batch in iterator: for sample in range(len(batch)): sample_id = batch["id"][sample] original_tokens = original_dataset[sample_id] masked_src_tokens = batch["net_input"]["src_tokens"][sample] masked_src_length = batch["net_input"]["src_lengths"][sample] masked_tgt_tokens = batch["target"][sample] original_offset = 0 masked_tgt_offset = 0 extra_id_token = len(vocab) - 1 for masked_src_token in masked_src_tokens[:masked_src_length]: if masked_src_token == extra_id_token: assert ( masked_src_token == masked_tgt_tokens[masked_tgt_offset] ) extra_id_token -= 1 masked_tgt_offset += 1 while ( original_offset < len(original_tokens) and masked_tgt_tokens[masked_tgt_offset] != extra_id_token ): assert ( original_tokens[original_offset] == masked_tgt_tokens[masked_tgt_offset] ) original_offset += 1 masked_tgt_offset += 1 else: assert original_tokens[original_offset] == masked_src_token original_offset += 1 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/tasks/test_span_masked_lm.py
EXA-1-master
exa/libraries/fairseq/tests/gpu/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch from fairseq.models.ema import EMA class DummyModule(torch.nn.Module): def __init__(self) -> None: """LightningModule for testing purposes Args: epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum validation loss for testing purposes (zero based). If None this is ignored. Defaults to None. """ super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x) @dataclass class EMAConfig(object): ema_decay: float = 0.99 ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None ema_update_freq: int = 1 @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestEMAGPU(unittest.TestCase): def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None): diff = x.float() - y.float() diff_norm = torch.norm(diff) other_norm = torch.norm(y.float()) if msg is None: msg = "|input - other| > {} + {} * |other|".format(atol, rtol) self.assertLessEqual( diff_norm, atol + rtol * other_norm, msg=msg, ) def test_ema(self): model = DummyModule().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig() ema = EMA(model, config) # set decay ema._set_decay(config.ema_decay) self.assertEqual(ema.get_decay(), config.ema_decay) # get model self.assertEqual(ema.get_model(), ema.model) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # EMA step x = torch.randn(32).cuda() y = model(x) loss = y.sum() loss.backward() optimizer.step() ema.step(model) ema_state_dict = ema.get_model().state_dict() for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema_state_dict[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # Load EMA into model model2 = DummyModule().cuda() ema.reverse(model2) for key, param in model2.state_dict().items(): ema_param = ema_state_dict[key] self.assertTrue(torch.allclose(ema_param, param)) def test_ema_fp32(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertIn(key, ema.fp32_params) # EMA update is done in fp32, and hence the EMA param must be # closer to the EMA update done in fp32 than in fp16. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), ) self.assertTorchAllClose( ema_param, ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ).half(), ) def test_ema_fp16(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=False) ema = EMA(model, config) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue # EMA update is done in fp16, and hence the EMA param must be # closer to the EMA update done in fp16 than in fp32. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), ) self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/gpu/test_ema_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_language_model, train_translation_model, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMultiGPU(unittest.TestCase): @staticmethod def parse_logs(logfile): logs = [] for ln in open(logfile, "r").readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs @property def world_size(self): return torch.cuda.device_count() def train_flags(self, mu): return [ "--memory-efficient-fp16", "--update-freq", "1", "--seed", "1", "--log-format", "json", "--max-update", str(mu), "--tokens-per-sample", "20", "--batch-size", "2", "--share-decoder-input-output-embed", "--optimizer", "adam", "--max-valid-steps", "1", "--pad-to-fixed-length", "--sample-break-mode", "none", ] def _test_resume_multilingual_training( self, extra_clargs, arch="transformer_lm_gpt2_tiny" ): languages = ["en_XX", "fr_XX", "zh_CN"] save_interval = 5 mu = 10 flags = ( self.train_flags(mu) + ["--save-interval-updates", str(save_interval), "--log-interval", "1"] + extra_clargs ) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data( data_dir, num_examples=int( mu * 20 * self.world_size * 1.5 ), # make sure enough data for max updates languages=languages, ) preprocess_lm_data(data_dir, languages) train_language_model( data_dir, arch, flags + ["--log-file", log], task="multilingual_language_modeling", world_size=self.world_size, ) log2 = os.path.join(data_dir, "resume.log") ckpt_name = f"checkpoint_1_{save_interval}.pt" restore_file = os.path.join(data_dir, ckpt_name) train_language_model( data_dir, arch, flags + ["--log-file", log2, "--restore-file", restore_file, "--no-save"], task="multilingual_language_modeling", world_size=self.world_size, ) l1 = self.parse_logs(log) assert ( int(l1[-1]["train_num_updates"]) == mu ), f"The first run did not complete {mu} updates. Add more data" l2 = self.parse_logs(log2) if int(l2[0]["num_updates"]) != save_interval + 1: all_ckpt_files = [ x for x in os.listdir(data_dir) if x.endswith(".pt") ] import shutil shutil.move(data_dir, "last_failed_resume") raise AssertionError( f"Likely failed to load {ckpt_name}. {all_ckpt_files} \n LOGS: {l1} \n\n {l2}. " ) for k in [ "train_loss", "train_num_updates", "train_ppl", "train_gnorm", ]: from_scratch, resumed = float(l1[-1][k]), float(l2[-1][k]) # This fails without rounding! assert ( from_scratch == resumed ), f"difference at {k} {from_scratch} != {resumed}" @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fp16_multigpu(self): self._test_multigpu("test_fp16", ["--fp16"]) def test_slowmo_multigpu(self): self._test_multigpu( "test_slowmo", ["--ddp-backend", "slowmo", "--nprocs-per-node", "1"] ) def test_slowmo_single_node_multigpu(self): self._test_multigpu( "test_slowmo_single_node", ["--ddp-backend", "slowmo", "--nprocs-per-node", "2"], ) def _test_multigpu(self, test_name, test_args): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory(test_name) as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", test_args + ["--log-file", log], world_size=min(torch.cuda.device_count(), 2), ) generate_main(data_dir) assert os.path.exists(log) @staticmethod def parse_logs(logfile): logs = [] for ln in open(logfile, "r").readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs def test_resume_training_fsdp(self): self._test_resume_training(["--ddp-backend", "fully_sharded"]) def test_resume_training_fsdp_sharded_state(self): self._test_resume_training( ["--ddp-backend", "fully_sharded", "--use-sharded-state"] ) def test_resume_training_noc10d(self): self._test_resume_training([]) def _test_resume_training(self, extra_clargs, arch="fconv_iwslt_de_en"): flags = [ "--fp16", "--log-format", "json", "--max-update", "10", "--save-interval-updates", "2", "--log-interval", "1", ] + extra_clargs world_size = min(torch.cuda.device_count(), 2) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch, flags + ["--log-file", log], world_size=world_size, ) log2 = os.path.join(data_dir, "resume.log") restore_file = os.path.join(data_dir, "checkpoint_1_2.pt") train_translation_model( data_dir, arch, flags + ["--log-file", log2, "--restore-file", restore_file], world_size=world_size, ) l1 = self.parse_logs(log) l2 = self.parse_logs(log2) assert int(l2[0]["num_updates"]) == 3, f"{l1}\n\n {l2}" for k in [ "train_loss", "train_num_updates", "train_ppl", "train_gnorm", ]: from_scratch, resumed = l1[-1][k], l2[-1][k] assert ( from_scratch == resumed ), f"difference at {k} {from_scratch} != {resumed}" def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_amp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--amp"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--amp", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) gen_config = [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ] # non-ensemble generation generate_main(data_dir, gen_config) # ensemble generation generate_main( data_dir, gen_config, path=os.pathsep.join( [ os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "checkpoint_last.pt"), ] ), ) def test_fsdp_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--log-file", log, "--ddp-backend", "fully_sharded"], world_size=world_size, ) generate_main(data_dir) assert os.path.exists(log) def test_fsdp_sharded_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--log-file", log, "--ddp-backend", "fully_sharded", "--use-sharded-state", ], world_size=world_size, ) generate_main(data_dir, ["--checkpoint-shard-count", str(world_size)]) assert os.path.exists(log) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) @unittest.skipIf( int(torch.__version__[2]) < 10, reason="quantized kernels are only supported on CPU" ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from torch import nn from fairseq.distributed import ModuleProxyWrapper from .utils import objects_are_equal class MockDDPWrapper(nn.Module): """A simple wrapper with an interface similar to DistributedDataParallel.""" def __init__(self, module): super().__init__() self.module = module def forward(self, x): return self.module(x) class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(5, 10) self.xyz = "hello" def forward(self, x): return self.linear(x) def get_xyz(self): return self.xyz class TestModuleProxyWrapper(unittest.TestCase): def _get_module(self): module = Model() wrapped_module = MockDDPWrapper(module) wrapped_module = ModuleProxyWrapper(wrapped_module) return wrapped_module, module def test_getattr_forwarding(self): wrapped_module, module = self._get_module() assert module.xyz == "hello" assert module.get_xyz() == "hello" assert wrapped_module.xyz == "hello" wrapped_module.xyz = "world" assert wrapped_module.xyz == "world" assert module.get_xyz() == "hello" def test_state_dict(self): wrapped_module, module = self._get_module() assert objects_are_equal(wrapped_module.state_dict(), module.state_dict()) def test_load_state_dict(self): wrapped_module, module = self._get_module() wrapped_module.load_state_dict(module.state_dict()) input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) def test_forward(self): wrapped_module, module = self._get_module() input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/distributed/test_module_proxy_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import functools import random import unittest from multiprocessing import Manager import torch import torch.nn as nn from omegaconf import OmegaConf from fairseq import optim from fairseq.distributed import utils as distributed_utils class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(cfg, args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank cfg.distributed_training.distributed_rank = args.distributed_rank if cfg.distributed_training.distributed_world_size > 1: distributed_utils.distributed_init(cfg) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF(cfg=cfg.bmuf, optimizer=optimizer) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(cfg, args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size cfg = OmegaConf.create() cfg.optimization = OmegaConf.create() cfg.common = OmegaConf.create() cfg.distributed_training = OmegaConf.create() cfg.dataset = OmegaConf.create() cfg.bmuf = OmegaConf.create() cfg.optimizer = OmegaConf.create() cfg.bmuf.global_sync_iter = args.global_sync_iter cfg.bmuf.block_momentum = args.block_momentum cfg.bmuf.block_lr = args.block_lr cfg.dataset.batch_size = args.batch_size cfg.optimization.lr = args.lr cfg.optimizer.momentum = args.momentum cfg.optimizer.weight_decay = args.weight_decay cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.use_nbm = args.use_nbm cfg.bmuf.average_sync = args.average_sync cfg.common.model_parallel_size = args.model_parallel_size cfg.distributed_training.distributed_backend = args.distributed_backend cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.distributed_training.distributed_init_method = args.distributed_init_method cfg.distributed_training.distributed_port = args.distributed_port return cfg, args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, cfg, args, iterations): results = Manager().dict() torch.multiprocessing.spawn( fn=functools.partial(single_gpu_training, cfg, args), args=(iterations, results), nprocs=args.distributed_world_size, join=True, ) return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup cfg, args = setup_args() iterations = 1 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync cfg, args = setup_args() args.warmup_iterations = 20 cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration cfg, args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.global_sync_iter = args.global_sync_iter iterations = 25 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 cfg, args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/distributed/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import sys import unittest import torch from fairseq.distributed import utils as dist_utils from .utils import objects_are_equal, spawn_and_init class DistributedTest(unittest.TestCase): def setUp(self): if not torch.cuda.is_available(): raise unittest.SkipTest("CUDA not available, skipping test") if sys.platform == "win32": raise unittest.SkipTest("NCCL doesn't support Windows, skipping test") if torch.cuda.device_count() < 2: raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping") class TestBroadcastObject(DistributedTest): def test_str(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, "hello world" ), world_size=2, ) def test_tensor(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, torch.rand(5), ), world_size=2, ) def test_complex(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int().cuda(), }, ), world_size=2, ) @staticmethod def _test_broadcast_object(ref_obj, rank, group): obj = dist_utils.broadcast_object( ref_obj if rank == 0 else None, src_rank=0, group=group ) assert objects_are_equal(ref_obj, obj) class TestAllGatherList(DistributedTest): def test_str_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, "hello world", ), world_size=2, ) def test_tensor_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, torch.rand(5), ), world_size=2, ) def test_complex_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int(), }, ), world_size=2, ) @staticmethod def _test_all_gather_list_equality(ref_obj, rank, group): objs = dist_utils.all_gather_list(ref_obj, group) for obj in objs: assert objects_are_equal(ref_obj, obj) def test_rank_tensor(self): spawn_and_init( TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2 ) @staticmethod def _test_all_gather_list_rank_tensor(rank, group): obj = torch.tensor([rank]) objs = dist_utils.all_gather_list(obj, group) for i, obj in enumerate(objs): assert obj.item() == i if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/distributed/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import signal import time import unittest import torch from torch import nn from fairseq.distributed import DistributedTimeoutWrapper class ModuleWithDelay(nn.Module): def __init__(self, delay): super().__init__() self.delay = delay def forward(self, x): time.sleep(self.delay) return x class TestDistributedTimeoutWrapper(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_no_timeout(self): module = DistributedTimeoutWrapper(ModuleWithDelay(1), 0, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() def test_timeout_safe(self): module = DistributedTimeoutWrapper(ModuleWithDelay(1), 10, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() def test_timeout_killed(self): with self.assertRaises(KeyboardInterrupt): module = DistributedTimeoutWrapper(ModuleWithDelay(5), 1, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/distributed/test_distributed_timeout_wrapper.py
EXA-1-master
exa/libraries/fairseq/tests/distributed/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import tempfile import torch def spawn_and_init(fn, world_size, args=None): if args is None: args = () with tempfile.NamedTemporaryFile(delete=False) as tmp_file: torch.multiprocessing.spawn( fn=functools.partial(init_and_run, fn, args), args=( world_size, tmp_file.name, ), nprocs=world_size, join=True, ) def distributed_init(rank, world_size, tmp_file): torch.distributed.init_process_group( backend="nccl", init_method="file://{}".format(tmp_file), world_size=world_size, rank=rank, ) torch.cuda.set_device(rank) def init_and_run(fn, args, rank, world_size, tmp_file): distributed_init(rank, world_size, tmp_file) group = torch.distributed.new_group() fn(rank, group, *args) def objects_are_equal(a, b) -> bool: if type(a) is not type(b): return False if isinstance(a, dict): if set(a.keys()) != set(b.keys()): return False for k in a.keys(): if not objects_are_equal(a[k], b[k]): return False return True elif isinstance(a, (list, tuple, set)): if len(a) != len(b): return False return all(objects_are_equal(x, y) for x, y in zip(a, b)) elif torch.is_tensor(a): return ( a.size() == b.size() and a.dtype == b.dtype and a.device == b.device and torch.all(a == b) ) else: return a == b
EXA-1-master
exa/libraries/fairseq/tests/distributed/utils.py
#!/usr/bin/env python3 import argparse import os import unittest from inspect import currentframe, getframeinfo import numpy as np import torch from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.data import data_utils as fairseq_data_utils from fairseq.data.dictionary import Dictionary from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqModel, ) from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 # /////////////////////////////////////////////////////////////////////////// # utility function to setup dummy dict/task/input # /////////////////////////////////////////////////////////////////////////// def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.tgt_dict = self.dictionary @property def target_dictionary(self): return self.dictionary def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def get_dummy_input(T=100, D=80, B=5, K=100): forward_input = {} # T max sequence length # D feature vector dimension # B batch size # K target dimension size feature = torch.randn(B, T, D) # this (B, T, D) layout is just a convention, you can override it by # write your own _prepare_forward_input function src_lengths = torch.from_numpy( np.random.randint(low=1, high=T, size=B, dtype=np.int64) ) src_lengths[0] = T # make sure the maximum length matches prev_output_tokens = [] for b in range(B): token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1) tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64) prev_output_tokens.append(torch.from_numpy(tokens)) prev_output_tokens = fairseq_data_utils.collate_tokens( prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False, ) src_lengths, sorted_order = src_lengths.sort(descending=True) forward_input["src_tokens"] = feature.index_select(0, sorted_order) forward_input["src_lengths"] = src_lengths forward_input["prev_output_tokens"] = prev_output_tokens return forward_input def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)): """ This only provides an example to generate dummy encoder output """ (T, B, D) = encoder_out_shape encoder_out = {} encoder_out["encoder_out"] = torch.from_numpy( np.random.randn(*encoder_out_shape).astype(np.float32) ) seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B)) # some dummy mask encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand( B, -1 ) >= seq_lengths.view(B, 1).expand(-1, T) encoder_out["encoder_padding_mask"].t_() # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate # whether encoder_out[t, b] is valid (=0) or not (=1) return encoder_out def _current_postion_info(): cf = currentframe() frameinfo = " (at {}:{})".format( os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno ) return frameinfo def check_encoder_output(encoder_output, batch_size=None): """we expect encoder_output to be a dict with the following key/value pairs: - encoder_out: a Torch.Tensor - encoder_padding_mask: a binary Torch.Tensor """ if not isinstance(encoder_output, dict): msg = ( "FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info() ) return False, msg if "encoder_out" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_out" + _current_postion_info() ) return False, msg if "encoder_padding_mask" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_padding_mask" + _current_postion_info() ) return False, msg if not isinstance(encoder_output["encoder_out"], torch.Tensor): msg = "encoder_out must be a torch.Tensor" + _current_postion_info() return False, msg if encoder_output["encoder_out"].dtype != torch.float32: msg = "encoder_out must have float32 dtype" + _current_postion_info() return False, msg mask = encoder_output["encoder_padding_mask"] if mask is not None: if not isinstance(mask, torch.Tensor): msg = ( "encoder_padding_mask must be a torch.Tensor" + _current_postion_info() ) return False, msg if mask.dtype != torch.uint8 and ( not hasattr(torch, "bool") or mask.dtype != torch.bool ): msg = ( "encoder_padding_mask must have dtype of uint8" + _current_postion_info() ) return False, msg if mask.dim() != 2: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)" + _current_postion_info() ) return False, msg if batch_size is not None and mask.size(1) != batch_size: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, with size(1)" + " being the batch size" + _current_postion_info() ) return False, msg return True, None def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None # /////////////////////////////////////////////////////////////////////////// # Base Test class # /////////////////////////////////////////////////////////////////////////// class TestBaseFairseqModelBase(unittest.TestCase): """ This class is used to facilitate writing unittest for any class derived from `BaseFairseqModel`. """ @classmethod def setUpClass(cls): if cls is TestBaseFairseqModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model): self.assertTrue(isinstance(model, BaseFairseqModel)) self.model = model def setupInput(self): pass def setUp(self): self.model = None self.forward_input = None pass class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase): """ base code to test FairseqEncoderDecoderModel (formally known as `FairseqModel`) must be derived from this base class """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderDecoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg="This class only tests for FairseqModel subclasses", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input def setUp(self): super().setUp() def test_forward(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) # for FairseqEncoderDecoderModel, forward returns a tuple of two # elements, the first one is a Torch.Tensor succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderModelBase(TestBaseFairseqModelBase): """ base class to test FairseqEncoderModel """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, FairseqEncoderModel), msg="This class is only used for testing FairseqEncoderModel", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): super().setUp() def test_forward(self): if self.forward_input and self.model: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.model.forward(**self.forward_input) # we expect forward_output to be a dict with the following # key/value pairs: # - encoder_out: a Torch.Tensor # - encoder_padding_mask: a binary Torch.Tensor succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderBase(unittest.TestCase): """ base class to test FairseqEncoder """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpEncoder(self, encoder): self.assertTrue( isinstance(encoder, FairseqEncoder), msg="This class is only used for test FairseqEncoder", ) self.encoder = encoder def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): self.encoder = None self.forward_input = None def test_forward(self): if self.encoder and self.forward_input: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.encoder.forward(**self.forward_input) succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output class TestFairseqDecoderBase(unittest.TestCase): """ base class to test FairseqDecoder """ @classmethod def setUpClass(cls): if cls is TestFairseqDecoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpDecoder(self, decoder): self.assertTrue( isinstance(decoder, FairseqDecoder), msg="This class is only used for test FairseqDecoder", ) self.decoder = decoder def setUpInput(self, input=None): self.forward_input = get_dummy_encoder_output() if input is None else input def setUpPrevOutputTokens(self, tokens=None): if tokens is None: self.encoder_input = get_dummy_input() self.prev_output_tokens = self.encoder_input["prev_output_tokens"] else: self.prev_output_tokens = tokens def setUp(self): self.decoder = None self.forward_input = None self.prev_output_tokens = None def test_forward(self): if ( self.decoder is not None and self.forward_input is not None and self.prev_output_tokens is not None ): forward_output = self.decoder.forward( prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input, ) succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_input = forward_output class DummyEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @classmethod def build_model(cls, args, task): return cls(DummyEncoder()) def get_logits(self, net_output): # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as # F.binary_cross_entropy_with_logits combines sigmoid and CE return torch.log( torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"]) ) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample) lprobs.batch_first = True return lprobs class DummyEncoder(FairseqEncoder): def __init__(self): super().__init__(None) def forward(self, src_tokens, src_lengths): mask, max_len = lengths_to_encoder_padding_mask(src_lengths) return {"encoder_out": src_tokens, "encoder_padding_mask": mask} class CrossEntropyCriterionTestBase(unittest.TestCase): @classmethod def setUpClass(cls): if cls is CrossEntropyCriterionTestBase: raise unittest.SkipTest("Skipping base class test case") super().setUpClass() def setUpArgs(self): args = argparse.Namespace() args.sentence_avg = False args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion return args def setUp(self): args = self.setUpArgs() self.model = DummyEncoderModel(encoder=DummyEncoder()) self.criterion = self.criterion_cls.build_criterion(args, task=DummyTask(args)) def get_src_tokens(self, correct_prediction, aggregate): """ correct_prediction: True if the net_output (src_tokens) should predict the correct target aggregate: True if the criterion expects net_output (src_tokens) aggregated across time axis """ predicted_idx = 0 if correct_prediction else 1 if aggregate: src_tokens = torch.zeros((2, 2), dtype=torch.float) for b in range(2): src_tokens[b][predicted_idx] = 1.0 else: src_tokens = torch.zeros((2, 10, 2), dtype=torch.float) for b in range(2): for t in range(10): src_tokens[b][t][predicted_idx] = 1.0 return src_tokens def get_target(self, soft_target): if soft_target: target = torch.zeros((2, 2), dtype=torch.float) for b in range(2): target[b][0] = 1.0 else: target = torch.zeros((2, 10), dtype=torch.long) return target def get_test_sample(self, correct, soft_target, aggregate): src_tokens = self.get_src_tokens(correct, aggregate) target = self.get_target(soft_target) L = src_tokens.size(1) return { "net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])}, "target": target, "ntokens": src_tokens.size(0) * src_tokens.size(1), }
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/asr_test_base.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np import torch from examples.speech_recognition.data.collaters import Seq2SeqCollater class TestSeq2SeqCollator(unittest.TestCase): def test_collate(self): eos_idx = 1 pad_idx = 0 collater = Seq2SeqCollater( feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx ) # 2 frames in the first sample and 3 frames in the second one frames1 = np.array([[7, 8], [9, 10]]) frames2 = np.array([[1, 2], [3, 4], [5, 6]]) target1 = np.array([4, 2, 3, eos_idx]) target2 = np.array([3, 2, eos_idx]) sample1 = {"id": 0, "data": [frames1, target1]} sample2 = {"id": 1, "data": [frames2, target2]} batch = collater.collate([sample1, sample2]) # collate sort inputs by frame's length before creating the batch self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) self.assertEqual(batch["ntokens"], 7) self.assertTensorEqual( batch["net_input"]["src_tokens"], torch.tensor( [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] ), ) self.assertTensorEqual( batch["net_input"]["prev_output_tokens"], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), ) self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) self.assertTensorEqual( batch["target"], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), ) self.assertEqual(batch["nsentences"], 2) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/test_collaters.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from examples.speech_recognition.criterions.cross_entropy_acc import ( CrossEntropyWithAccCriterion, ) from .asr_test_base import CrossEntropyCriterionTestBase class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase): def setUp(self): self.criterion_cls = CrossEntropyWithAccCriterion super().setUp() def test_cross_entropy_all_correct(self): sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 20 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20 def test_cross_entropy_all_wrong(self): sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 0 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/test_cross_entropy.py
#!/usr/bin/env python3 # import models/encoder/decoder to be tested from examples.speech_recognition.models.vggtransformer import ( TransformerDecoder, VGGTransformerEncoder, VGGTransformerModel, vggtransformer_1, vggtransformer_2, vggtransformer_base, ) # import base test class from .asr_test_base import ( DEFAULT_TEST_VOCAB_SIZE, TestFairseqDecoderBase, TestFairseqEncoderBase, TestFairseqEncoderDecoderModelBase, get_dummy_dictionary, get_dummy_encoder_output, get_dummy_input, ) class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_1 use 14 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_1, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_2 use 16 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_2, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_base use 12 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_base, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerEncoderTest(TestFairseqEncoderBase): def setUp(self): super().setUp() self.setUpInput(get_dummy_input(T=50, D=80, B=5)) def test_forward(self): print("1. test standard vggtransformer") self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80)) super().test_forward() print("2. test vggtransformer with limited right context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 5) ) ) super().test_forward() print("3. test vggtransformer with limited left context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(5, -1) ) ) super().test_forward() print("4. test vggtransformer with limited right context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 12), transformer_sampling=(2, 2), ) ) super().test_forward() print("5. test vggtransformer with windowed context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(12, 12), transformer_sampling=(2, 2), ) ) class TransformerDecoderTest(TestFairseqDecoderBase): def setUp(self): super().setUp() dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE) decoder = TransformerDecoder(dict) dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256)) self.setUpDecoder(decoder) self.setUpInput(dummy_encoder_output) self.setUpPrevOutputTokens()
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/test_vggtransformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from examples.speech_recognition.data import data_utils class DataUtilsTest(unittest.TestCase): def test_normalization(self): sample_len1 = torch.tensor( [ [ -0.7661, -1.3889, -2.0972, -0.9134, -0.7071, -0.9765, -0.8700, -0.8283, 0.7512, 1.3211, 2.1532, 2.1174, 1.2800, 1.2633, 1.6147, 1.6322, 2.0723, 3.1522, 3.2852, 2.2309, 2.5569, 2.2183, 2.2862, 1.5886, 0.8773, 0.8725, 1.2662, 0.9899, 1.1069, 1.3926, 1.2795, 1.1199, 1.1477, 1.2687, 1.3843, 1.1903, 0.8355, 1.1367, 1.2639, 1.4707, ] ] ) out = data_utils.apply_mv_norm(sample_len1) assert not torch.isnan(out).any() assert (out == sample_len1).all()
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/test_data_utils.py
EXA-1-master
exa/libraries/fairseq/tests/speech_recognition/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from tests.speech import TestFairseqSpeech S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq/" class TestConvtransformerSimulTrans(TestFairseqSpeech): def setUp(self): self._set_up( "simul", "speech_tests/simul", ["config_gcmvn_specaug.yaml", "dict.txt", "dev.tsv"], ) def test_waitk_checkpoint(self): """Only test model loading since fairseq currently doesn't support inference of simultaneous models""" _, _, _, _ = self.download_and_load_checkpoint( "checkpoint_best.pt", arg_overrides={ "config_yaml": "config_gcmvn_specaug.yaml", "load_pretrained_encoder_from": None, }, ) return if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_convtransformer_simul_trans.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tqdm import tqdm from fairseq import utils from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestFastSpeech2(TestFairseqSpeech): def setUp(self): self.set_up_ljspeech() @torch.no_grad() def test_ljspeech_fastspeech2_checkpoint(self): models, cfg, task, generator = self.download_and_load_checkpoint( "ljspeech_fastspeech2_g2p.pt", arg_overrides={ "config_yaml": "cfg_ljspeech_g2p.yaml", "vocoder": "griffin_lim", "fp16": False, }, ) batch_iterator = self.get_batch_iterator(task, "ljspeech_test", 65_536, 4_096) progress = tqdm(batch_iterator, total=len(batch_iterator)) mcd, n_samples = 0.0, 0 for sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypos = generator.generate(models[0], sample, has_targ=True) rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], sr=task.sr, ) mcd += sum(d.item() for d, _ in rets) n_samples += len(sample["id"].tolist()) mcd = round(mcd / n_samples, 1) reference_mcd = 3.2 print(f"MCD: {mcd} (reference: {reference_mcd})") self.assertAlmostEqual(mcd, reference_mcd, delta=0.1) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_fastspeech2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tqdm import tqdm from fairseq import utils from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestTTSTransformer(TestFairseqSpeech): def setUp(self): self.set_up_ljspeech() @torch.no_grad() def test_ljspeech_tts_transformer_checkpoint(self): models, cfg, task, generator = self.download_and_load_checkpoint( "ljspeech_transformer_g2p.pt", arg_overrides={ "config_yaml": "cfg_ljspeech_g2p.yaml", "vocoder": "griffin_lim", "fp16": False, }, ) batch_iterator = self.get_batch_iterator(task, "ljspeech_test", 65_536, 1024) progress = tqdm(batch_iterator, total=len(batch_iterator)) mcd, n_samples = 0.0, 0 for sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypos = generator.generate(models[0], sample, has_targ=True) rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], sr=task.sr, ) mcd += sum(d.item() for d, _ in rets) n_samples += len(sample["id"].tolist()) mcd = round(mcd / n_samples, 1) reference_mcd = 3.3 print(f"MCD: {mcd} (reference: {reference_mcd})") self.assertAlmostEqual(mcd, reference_mcd, delta=0.1) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_tts_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import os import re import unittest from pathlib import Path from tqdm import tqdm from typing import List, Dict, Optional import torch from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.scoring.wer import WerScorer from fairseq.scoring.bleu import SacrebleuScorer from fairseq import utils import zipfile S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq" class TestFairseqSpeech(unittest.TestCase): @classmethod def download(cls, base_url: str, out_root: Path, filename: str): url = f"{base_url}/{filename}" path = out_root / filename if not path.exists(): torch.hub.download_url_to_file(url, path.as_posix(), progress=True) return path def _set_up(self, dataset_id: str, s3_dir: str, data_filenames: List[str]): self.use_cuda = torch.cuda.is_available() self.root = Path.home() / ".cache" / "fairseq" / dataset_id self.root.mkdir(exist_ok=True, parents=True) os.chdir(self.root) self.base_url = ( s3_dir if re.search("^https:", s3_dir) else f"{S3_BASE_URL}/{s3_dir}" ) for filename in data_filenames: self.download(self.base_url, self.root, filename) def set_up_librispeech(self): self._set_up( "librispeech", "s2t/librispeech", [ "cfg_librispeech.yaml", "spm_librispeech_unigram10000.model", "spm_librispeech_unigram10000.txt", "librispeech_test-other.tsv", "librispeech_test-other.zip", ], ) def set_up_ljspeech(self): self._set_up( "ljspeech", "s2/ljspeech", [ "cfg_ljspeech_g2p.yaml", "ljspeech_g2p_gcmvn_stats.npz", "ljspeech_g2p.txt", "ljspeech_test.tsv", "ljspeech_test.zip", ], ) def set_up_sotasty_es_en(self): self._set_up( "sotasty_es_en", "s2t/big/es-en", [ "cfg_es_en.yaml", "spm_bpe32768_es_en.model", "spm_bpe32768_es_en.txt", "sotasty_es_en_test_ted.tsv", "sotasty_es_en_test_ted.zip", ], ) def set_up_mustc_de_fbank(self): self._set_up( "mustc_de_fbank", "https://dl.fbaipublicfiles.com/joint_speech_text_4_s2t/must_c/en_de", [ "config.yaml", "spm.model", "dict.txt", "src_dict.txt", "tgt_dict.txt", "tst-COMMON.tsv", "tst-COMMON.zip", ], ) def download_and_load_checkpoint( self, checkpoint_filename: str, arg_overrides: Optional[Dict[str, str]] = None, strict: bool = True, ): path = self.download(self.base_url, self.root, checkpoint_filename) _arg_overrides = arg_overrides or {} _arg_overrides["data"] = self.root.as_posix() models, cfg, task = load_model_ensemble_and_task( [path.as_posix()], arg_overrides=_arg_overrides, strict=strict ) if self.use_cuda: for model in models: model.cuda() return models, cfg, task, self.build_generator(task, models, cfg) def build_generator( self, task, models, cfg, ): return task.build_generator(models, cfg) @classmethod def get_batch_iterator(cls, task, test_split, max_tokens, max_positions): task.load_dataset(test_split) return task.get_batch_iterator( dataset=task.dataset(test_split), max_tokens=max_tokens, max_positions=max_positions, num_workers=1, ).next_epoch_itr(shuffle=False) @classmethod def get_wer_scorer( cls, tokenizer="none", lowercase=False, remove_punct=False, char_level=False ): scorer_args = { "wer_tokenizer": tokenizer, "wer_lowercase": lowercase, "wer_remove_punct": remove_punct, "wer_char_level": char_level, } return WerScorer(Namespace(**scorer_args)) @classmethod def get_bleu_scorer(cls, tokenizer="13a", lowercase=False, char_level=False): scorer_args = { "sacrebleu_tokenizer": tokenizer, "sacrebleu_lowercase": lowercase, "sacrebleu_char_level": char_level, } return SacrebleuScorer(Namespace(**scorer_args)) @torch.no_grad() def base_test( self, ckpt_name, reference_score, score_delta=0.3, dataset="librispeech_test-other", max_tokens=65_536, max_positions=(4_096, 1_024), arg_overrides=None, strict=True, score_type="wer", ): models, _, task, generator = self.download_and_load_checkpoint( ckpt_name, arg_overrides=arg_overrides, strict=strict ) if not self.use_cuda: return batch_iterator = self.get_batch_iterator( task, dataset, max_tokens, max_positions ) if score_type == "bleu": scorer = self.get_bleu_scorer() elif score_type == "wer": scorer = self.get_wer_scorer() else: raise Exception(f"Unsupported score type {score_type}") progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator)) for batch_idx, sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypo = task.inference_step(generator, models, sample) for i, sample_id in enumerate(sample["id"].tolist()): tgt_str, hypo_str = self.postprocess_tokens( task, sample["target"][i, :], hypo[i][0]["tokens"].int().cpu(), ) if batch_idx == 0 and i < 3: print(f"T-{sample_id} {tgt_str}") print(f"H-{sample_id} {hypo_str}") scorer.add_string(tgt_str, hypo_str) print(scorer.result_string() + f" (reference: {reference_score})") self.assertAlmostEqual(scorer.score(), reference_score, delta=score_delta) def postprocess_tokens(self, task, target, hypo_tokens): tgt_tokens = utils.strip_pad(target, task.tgt_dict.pad()).int().cpu() tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece") hypo_str = task.tgt_dict.string(hypo_tokens, "sentencepiece") return tgt_str, hypo_str def unzip_files(self, zip_file_name): zip_file_path = self.root / zip_file_name with zipfile.ZipFile(zip_file_path, "r") as zip_ref: zip_ref.extractall(self.root / zip_file_name.strip(".zip"))
EXA-1-master
exa/libraries/fairseq/tests/speech/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from argparse import Namespace from collections import namedtuple from pathlib import Path import torch from tqdm import tqdm import fairseq from fairseq import utils from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.scoring.bleu import SacrebleuScorer from fairseq.tasks import import_tasks from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestDualInputS2TTransformer(TestFairseqSpeech): def setUp(self): self.set_up_mustc_de_fbank() def import_user_module(self): user_dir = ( Path(fairseq.__file__).parent.parent / "examples/speech_text_joint_to_text" ) Arg = namedtuple("Arg", ["user_dir"]) arg = Arg(user_dir.__str__()) utils.import_user_module(arg) @torch.no_grad() def test_mustc_de_fbank_dualinput_s2t_transformer_checkpoint(self): self.import_user_module() checkpoint_filename = "checkpoint_ave_10.pt" path = self.download(self.base_url, self.root, checkpoint_filename) models, cfg, task = load_model_ensemble_and_task( [path.as_posix()], arg_overrides={ "data": self.root.as_posix(), "config_yaml": "config.yaml", "load_pretrain_speech_encoder": "", "load_pretrain_text_encoder_last": "", "load_pretrain_decoder": "", "beam": 10, "nbest": 1, "lenpen": 1.0, "load_speech_only": True, }, ) if self.use_cuda: for model in models: model.cuda() generator = task.build_generator(models, cfg) test_split = "tst-COMMON" task.load_dataset(test_split) batch_iterator = task.get_batch_iterator( dataset=task.dataset(test_split), max_tokens=250_000, max_positions=(10_000, 1_024), num_workers=1, ).next_epoch_itr(shuffle=False) tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer_args = { "sacrebleu_tokenizer": "13a", "sacrebleu_lowercase": False, "sacrebleu_char_level": False, } scorer = SacrebleuScorer(Namespace(**scorer_args)) progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator)) for batch_idx, sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypo = task.inference_step(generator, models, sample) for i, sample_id in enumerate(sample["id"].tolist()): tgt_tokens = ( utils.strip_pad(sample["target"][i, :], task.tgt_dict.pad()) .int() .cpu() ) tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece") hypo_str = task.tgt_dict.string( hypo[i][0]["tokens"].int().cpu(), "sentencepiece" ) if batch_idx == 0 and i < 3: print(f"T-{sample_id} {tgt_str}") print(f"D-{sample_id} {hypo_str}") scorer.add_string(tgt_str, hypo_str) reference_bleu = 27.3 result = scorer.result_string() print(result + f" (reference: {reference_bleu})") res_bleu = float(result.split()[2]) self.assertAlmostEqual(res_bleu, reference_bleu, delta=0.3) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_dualinput_s2t_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from tests.speech import TestFairseqSpeech class TestS2TConformer(TestFairseqSpeech): def setUp(self): self.set_up_librispeech() def test_librispeech_s2t_conformer_s_checkpoint(self): self.base_test( ckpt_name="librispeech_conformer_rel_pos_s.pt", reference_score=12, arg_overrides={"config_yaml": "cfg_librispeech.yaml"}, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_s2t_conformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import namedtuple from pathlib import Path import torch from tqdm import tqdm import fairseq from fairseq import utils from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.scoring.bleu import SacrebleuScorer from fairseq.tasks import import_tasks from tests.speech import S3_BASE_URL, TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestLibrispeechDualInputWavTransformer(TestFairseqSpeech): def setUp(self): dataset_id = "librispeech_wvtrasnformer" base_url = "https://dl.fbaipublicfiles.com/joint_speech_text_4_s2t/acl2022/librispeech/finetuned" data_filenames = [ "checkpoint_ave_10.pt", "spm.model", "src_dict.txt", "tgt_dict.txt", "config.yaml", ] self._set_up( dataset_id, "s2t", [ "librispeech_flac_test-other.tsv", "librispeech_flac_test-other.zip", ], ) for filename in data_filenames: self.download(base_url, self.root, filename) def import_user_module(self): user_dir = ( Path(fairseq.__file__).parent.parent / "examples/speech_text_joint_to_text" ) Arg = namedtuple("Arg", ["user_dir"]) arg = Arg(user_dir.__str__()) utils.import_user_module(arg) @torch.no_grad() def test_librispeech_dualinput_wav_transformer_checkpoint(self): self.import_user_module() checkpoint_filename = "checkpoint_ave_10.pt" arg_overrides = { "config_yaml": "config.yaml", "load_pretrained_speech_text_encoder": "", "load_pretrained_speech_text_decoder": "", "beam": 10, "nbest": 1, "lenpen": 1.0, "load_speech_only": True, } self.base_test( checkpoint_filename, 4.6, dataset="librispeech_flac_test-other", max_tokens=800000, max_positions=(800000, 1024), arg_overrides=arg_overrides, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_dual_input_wav_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from tests.speech import TestFairseqSpeech from fairseq import utils S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq/" class TestS2STransformer(TestFairseqSpeech): def setUp(self): self._set_up( "s2s", "speech_tests/s2s", [ "dev_shuf200.tsv", "src_feat.zip", "config_specaug_lb.yaml", "vocoder", "vocoder_config.json", ], ) def test_s2s_transformer_checkpoint(self): self.base_test( ckpt_name="s2u_transformer_reduced_fisher.pt", reference_score=38.3, dataset="dev_shuf200", arg_overrides={ "config_yaml": "config_specaug_lb.yaml", "multitask_config_yaml": None, "target_is_code": True, "target_code_size": 100, "eval_inference": False, }, score_type="bleu", strict=False, ) def postprocess_tokens(self, task, target, hypo_tokens): tgt_tokens = utils.strip_pad(target, task.tgt_dict.pad()).int().cpu() tgt_str = task.tgt_dict.string(tgt_tokens) hypo_str = task.tgt_dict.string(hypo_tokens) return tgt_str, hypo_str if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_s2s_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tests.speech import TestFairseqSpeech from fairseq.data.data_utils import post_process from fairseq import utils from omegaconf import open_dict S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq" @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestWav2Vec2(TestFairseqSpeech): def setUp(self): self._set_up( "librispeech_w2v2", "conformer/wav2vec2/librispeech", [ "test_librispeech-other.ltr", "test_librispeech-other.tsv", "test_librispeech-other_small.ltr_100", "test_librispeech-other_small.tsv", "test-other.zip", "dict.ltr.txt", "dict.ltr_100.txt", ], ) self.unzip_files( "test-other.zip", ) def test_transformer_w2v2(self): self.base_test( ckpt_name="transformer_oss_small_100h.pt", reference_score=38, score_delta=1, dataset="test_librispeech-other", max_tokens=1000000, max_positions=(700000, 1000), arg_overrides={ "task": "audio_finetuning", "labels": "ltr", "nbest": 1, "tpu": False, }, strict=False, ) def test_conformer_w2v2(self): self.base_test( ckpt_name="conformer_LS_PT_LS_FT_rope.pt", reference_score=4.5, score_delta=1, dataset="test_librispeech-other_small", max_tokens=1000000, max_positions=(700000, 1000), arg_overrides={ "task": "audio_finetuning", "labels": "ltr_100", "nbest": 1, "tpu": False, }, strict=True, ) def build_generator(self, task, models, cfg): try: from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder except Exception: raise Exception("Cannot run this test without flashlight dependency") with open_dict(cfg): cfg.nbest = 1 return W2lViterbiDecoder(cfg, task.target_dictionary) def postprocess_tokens(self, task, target, hypo_tokens): tgt_tokens = utils.strip_pad(target, task.target_dictionary.pad()).int().cpu() tgt_str = task.target_dictionary.string(tgt_tokens) tgt_str = post_process(tgt_str, "letter") hypo_pieces = task.target_dictionary.string(hypo_tokens) hypo_str = post_process(hypo_pieces, "letter") return tgt_str, hypo_str if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_wav2vec2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from tests.speech import TestFairseqSpeech class TestS2TTransformer(TestFairseqSpeech): def setUp(self): self.set_up_librispeech() def test_librispeech_s2t_transformer_s_checkpoint(self): self.base_test( ckpt_name="librispeech_transformer_s.pt", reference_score=9, arg_overrides={"config_yaml": "cfg_librispeech.yaml"}, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_s2t_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from tests.speech import TestFairseqSpeech class TestXMTransformer(TestFairseqSpeech): def setUp(self): self.set_up_sotasty_es_en() # TODO: investigate increases BLEU score (30.42 -> 31.74) def test_sotasty_es_en_600m_checkpoint(self): self.base_test( ckpt_name="xm_transformer_600m_es_en_md.pt", reference_score=31.74, score_delta=0.2, max_tokens=3_000_000, max_positions=(1_000_000, 1_024), dataset="sotasty_es_en_test_ted", arg_overrides={"config_yaml": "cfg_es_en.yaml"}, score_type="bleu", ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/libraries/fairseq/tests/speech/test_xm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from pathlib import Path from typing import Callable, List, Optional, Union import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, EMAConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass # this import is for backward compatibility from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa def get_preprocessing_parser(default_task="translation"): parser = get_parser("Preprocessing", default_task) add_preprocess_args(parser) return parser def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) add_ema_args(parser) return parser def get_generation_parser(interactive=False, default_task="translation"): parser = get_parser("Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) add_checkpoint_args(parser) if interactive: add_interactive_args(parser) return parser def get_speech_generation_parser(default_task="text_to_speech"): parser = get_parser("Speech Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_speech_generation_args(parser) return parser def get_interactive_generation_parser(default_task="translation"): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task="language_modeling"): parser = get_parser("Evaluate Language Model", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_eval_lm_args(parser) return parser def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") gen_parser_from_dataclass(group, CommonEvalConfig()) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False if getattr(args, "update_epoch_batch_itr", None) is None: if hasattr(args, "grouped_shuffling"): args.update_epoch_batch_itr = args.grouped_shuffling else: args.grouped_shuffling = False args.update_epoch_batch_itr = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonConfig()) from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( "--" + registry_name.replace("_", "-"), default=REGISTRY["default"], choices=REGISTRY["registry"].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument( "--task", metavar="TASK", default=default_task, choices=TASK_REGISTRY.keys(), help="task", ) # fmt: on return parser def add_preprocess_args(parser): group = parser.add_argument_group("Preprocessing") # fmt: off group.add_argument("-s", "--source-lang", default=None, metavar="SRC", help="source language") group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", help="target language") group.add_argument("--trainpref", metavar="FP", default=None, help="train file prefix (also used to build dictionaries)") group.add_argument("--validpref", metavar="FP", default=None, help="comma separated, valid file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--testpref", metavar="FP", default=None, help="comma separated, test file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--align-suffix", metavar="FP", default=None, help="alignment file suffix") group.add_argument("--destdir", metavar="DIR", default="data-bin", help="destination dir") group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--tgtdict", metavar="FP", help="reuse given target dictionary") group.add_argument("--srcdict", metavar="FP", help="reuse given source dictionary") group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, help="number of target words to retain") group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, help="number of source words to retain") group.add_argument("--alignfile", metavar="ALIGN", default=None, help="an alignment file (optional)") parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument("--joined-dictionary", action="store_true", help="Generate joined dictionary") group.add_argument("--only-source", action="store_true", help="Only process the source language") group.add_argument("--padding-factor", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N") group.add_argument("--workers", metavar="N", default=1, type=int, help="number of parallel workers") group.add_argument("--dict-only", action='store_true', help="if true, only builds a dictionary and then exits") # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("dataset_data_loading") gen_parser_from_dataclass(group, DatasetConfig()) # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("distributed_training") if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) gen_parser_from_dataclass( group, DistributedTrainingConfig(distributed_world_size=default_world_size) ) return group def add_optimization_args(parser): group = parser.add_argument_group("optimization") # fmt: off gen_parser_from_dataclass(group, OptimizationConfig()) # fmt: on return group def add_checkpoint_args(parser): group = parser.add_argument_group("checkpoint") # fmt: off gen_parser_from_dataclass(group, CheckpointConfig()) # fmt: on return group def add_common_eval_args(group): gen_parser_from_dataclass(group, CommonEvalConfig()) def add_eval_lm_args(parser): group = parser.add_argument_group("LM Evaluation") add_common_eval_args(group) gen_parser_from_dataclass(group, EvalLMConfig()) def add_generation_args(parser): group = parser.add_argument_group("Generation") add_common_eval_args(group) gen_parser_from_dataclass(group, GenerationConfig()) return group def add_speech_generation_args(parser): group = parser.add_argument_group("Speech Generation") add_common_eval_args(group) # NOTE: remove_bpe is not needed # fmt: off group.add_argument('--eos_prob_threshold', default=0.5, type=float, help='terminate when eos probability exceeds this') # fmt: on return group def add_interactive_args(parser): group = parser.add_argument_group("Interactive") gen_parser_from_dataclass(group, InteractiveConfig()) def add_model_args(parser): group = parser.add_argument_group("Model configuration") # fmt: off # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture') # fmt: on return group def get_args( data: Union[str, Path], task: str = "translation", arch: str = "transformer", **overrides ): parser = get_training_parser(task) args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch]) for k, v in overrides.items(): setattr(args, k, v) return args def add_ema_args(parser): group = parser.add_argument_group("EMA configuration") gen_parser_from_dataclass(group, EMAConfig())
EXA-1-master
exa/libraries/fairseq/fairseq/options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import numpy as np import torch from fairseq import utils DecoderOut = namedtuple( "IterativeRefinementDecoderOut", ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], ) class IterativeRefinementGenerator(object): def __init__( self, tgt_dict, models=None, eos_penalty=0.0, max_iter=10, max_ratio=2, beam_size=1, decoding_format=None, retain_dropout=False, adaptive=True, retain_history=False, reranking=False, ): """ Generates translations based on iterative refinement. Args: tgt_dict: target dictionary eos_penalty: if > 0.0, it penalized early-stopping in decoding max_iter: maximum number of refinement iterations max_ratio: generate sequences of maximum length ax, where x is the source length decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} retain_dropout: retaining dropout in the inference adaptive: decoding with early stop """ self.bos = tgt_dict.bos() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.eos_penalty = eos_penalty self.max_iter = max_iter self.max_ratio = max_ratio self.beam_size = beam_size self.reranking = reranking self.decoding_format = decoding_format self.retain_dropout = retain_dropout self.retain_history = retain_history self.adaptive = adaptive self.models = models def generate_batched_itr( self, data_itr, maxlen_a=None, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ for sample in data_itr: if "net_input" not in sample: continue if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( self.models, sample, prefix_tokens=sample["target"][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sample["ntokens"]) for i, id in enumerate(sample["id"]): # remove padding src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) ref = utils.strip_pad(sample["target"][i, :], self.pad) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample, prefix_tokens=None, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the IterativeRefinementGenerator is not supported" ) # TODO: iterative refinement generator does not support ensemble for now. if not self.retain_dropout: for model in models: model.eval() model, reranker = models[0], None if self.reranking: assert len(models) > 1, "Assuming the last checkpoint is the reranker" assert ( self.beam_size > 1 ), "Reranking requires multiple translation for each example" reranker = models[-1] models = models[:-1] if len(models) > 1 and hasattr(model, "enable_ensemble"): assert model.allow_ensemble, "{} does not support ensembling".format( model.__class__.__name__ ) model.enable_ensemble(models) # TODO: better encoder inputs? src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size() # initialize encoder_out = model.forward_encoder([src_tokens, src_lengths]) prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) if self.beam_size > 1: assert ( model.allow_length_beam ), "{} does not support decoding with length beam.".format( model.__class__.__name__ ) # regenerate data based on length-beam length_beam_order = ( utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, length_beam_order ) prev_decoder_out = model.regenerate_length_beam( prev_decoder_out, self.beam_size ) bsz = bsz * self.beam_size sent_idxs = torch.arange(bsz) prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.retain_history: prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) finalized = [[] for _ in range(bsz)] def is_a_loop(x, y, s, a): b, l_x, l_y = x.size(0), x.size(1), y.size(1) if l_x > l_y: y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) if a is not None: a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) elif l_x < l_y: x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) return (x == y).all(1), y, s, a def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): cutoff = prev_out_token.ne(self.pad) tokens = prev_out_token[cutoff] if prev_out_score is None: scores, score = None, None else: scores = prev_out_score[cutoff] score = scores.mean() if prev_out_attn is None: hypo_attn, alignment = None, None else: hypo_attn = prev_out_attn[cutoff] alignment = hypo_attn.max(dim=1)[1] return { "steps": step, "tokens": tokens, "positional_scores": scores, "score": score, "hypo_attn": hypo_attn, "alignment": alignment, } for step in range(self.max_iter + 1): decoder_options = { "eos_penalty": self.eos_penalty, "max_ratio": self.max_ratio, "decoding_format": self.decoding_format, } prev_decoder_out = prev_decoder_out._replace( step=step, max_step=self.max_iter + 1, ) decoder_out = model.forward_decoder( prev_decoder_out, encoder_out, **decoder_options ) if self.adaptive: # terminate if there is a loop terminated, out_tokens, out_scores, out_attn = is_a_loop( prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn, ) decoder_out = decoder_out._replace( output_tokens=out_tokens, output_scores=out_scores, attn=out_attn, ) else: terminated = decoder_out.output_tokens.new_zeros( decoder_out.output_tokens.size(0) ).bool() if step == self.max_iter: # reach last iteration, terminate terminated.fill_(1) # collect finalized sentences finalized_idxs = sent_idxs[terminated.to(sent_idxs.device)] finalized_tokens = decoder_out.output_tokens[terminated] finalized_scores = decoder_out.output_scores[terminated] finalized_attn = ( None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated] ) if self.retain_history: finalized_history_tokens = [h[terminated] for h in decoder_out.history] for i in range(finalized_idxs.size(0)): finalized[finalized_idxs[i]] = [ finalized_hypos( step, finalized_tokens[i], finalized_scores[i], None if finalized_attn is None else finalized_attn[i], ) ] if self.retain_history: finalized[finalized_idxs[i]][0]["history"] = [] for j in range(len(finalized_history_tokens)): finalized[finalized_idxs[i]][0]["history"].append( finalized_hypos( step, finalized_history_tokens[j][i], None, None ) ) # check if all terminated if terminated.sum() == terminated.size(0): break # for next step not_terminated = ~terminated prev_decoder_out = decoder_out._replace( output_tokens=decoder_out.output_tokens[not_terminated], output_scores=decoder_out.output_scores[not_terminated], attn=decoder_out.attn[not_terminated] if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) else None, history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None, ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() ) sent_idxs = sent_idxs[not_terminated.to(sent_idxs.device)] prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.beam_size > 1: if reranker is not None: finalized = self.rerank( reranker, finalized, [src_tokens, src_lengths], self.beam_size ) # aggregate information from length beam finalized = [ finalized[ np.argmax( [ finalized[self.beam_size * i + j][0]["score"] for j in range(self.beam_size) ] ) + self.beam_size * i ] for i in range(len(finalized) // self.beam_size) ] return finalized def rerank(self, reranker, finalized, encoder_input, beam_size): def rebuild_batch(finalized): finalized_tokens = [f[0]["tokens"] for f in finalized] finalized_maxlen = max(f.size(0) for f in finalized_tokens) final_output_tokens = ( finalized_tokens[0] .new_zeros(len(finalized_tokens), finalized_maxlen) .fill_(self.pad) ) for i, f in enumerate(finalized_tokens): final_output_tokens[i, : f.size(0)] = f return final_output_tokens final_output_tokens = rebuild_batch(finalized) final_output_tokens[ :, 0 ] = self.eos # autoregressive model assumes starting with EOS reranker_encoder_out = reranker.encoder(*encoder_input) length_beam_order = ( utils.new_arange( final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) ) .t() .reshape(-1) ) reranker_encoder_out = reranker.encoder.reorder_encoder_out( reranker_encoder_out, length_beam_order ) reranking_scores = reranker.get_normalized_probs( reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None, ) reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) reranking_masks = final_output_tokens[:, 1:].ne(self.pad) reranking_scores = ( reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) ) reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( reranking_scores ) for i in range(len(finalized)): finalized[i][0]["score"] = reranking_scores[i] return finalized
EXA-1-master
exa/libraries/fairseq/fairseq/iterative_refinement_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name """ def __init__(self, model, forward=True, backward=True): self.bhooks = [] self.fhooks = [] self.forward = forward self.backward = backward self.named_parameters = list(model.named_parameters()) self.reset() for name, mod in model.named_modules(): mod.__module_name = name self.add_hooks(mod) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): # Dump out all model gnorms to enable better debugging norm = {} gradients = {} for name, param in self.named_parameters: if param.grad is not None: grad_norm = torch.norm(param.grad.data.float(), p=2) norm[name] = param.norm().item() if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any(): gradients[name] = param.grad.data if len(gradients) > 0: logger.info("Detected nan/inf grad norm, dumping norms...") logger.info(f"norms: {norm}") logger.info(f"gradients: {gradients}") self.close() def add_hooks(self, module): if self.forward: self.fhooks.append(module.register_forward_hook(self.fhook_fn)) if self.backward: self.bhooks.append(module.register_backward_hook(self.bhook_fn)) def reset(self): self.has_printed_f = False self.has_printed_b = False def _detect(self, tensor, name, backward): err = None if ( torch.is_floating_point(tensor) # single value tensors (like the loss) will not provide much info and tensor.numel() >= 2 ): with torch.no_grad(): if torch.isnan(tensor).any(): err = "NaN" elif torch.isinf(tensor).any(): err = "Inf" if err is not None: err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}" return err def _apply(self, module, inp, x, backward): if torch.is_tensor(x): if isinstance(inp, tuple) and len(inp) > 0: inp = inp[0] err = self._detect(x, module.__module_name, backward) if err is not None: if torch.is_tensor(inp) and not backward: err += ( f" input max: {inp.max().item()}, input min: {inp.min().item()}" ) has_printed_attr = "has_printed_b" if backward else "has_printed_f" logger.warning(err) setattr(self, has_printed_attr, True) elif isinstance(x, dict): for v in x.values(): self._apply(module, inp, v, backward) elif isinstance(x, list) or isinstance(x, tuple): for v in x: self._apply(module, inp, v, backward) def fhook_fn(self, module, inp, output): if not self.has_printed_f: self._apply(module, inp, output, backward=False) def bhook_fn(self, module, inp, output): if not self.has_printed_b: self._apply(module, inp, output, backward=True) def close(self): for hook in self.fhooks + self.bhooks: hook.remove()
EXA-1-master
exa/libraries/fairseq/fairseq/nan_detector.py
# Originally from Microsoft Corporation. # Licensed under the MIT License. """ Wrapper for ngram_repeat_block cuda extension """ import math import warnings from typing import List import torch from torch import nn try: from fairseq import ngram_repeat_block_cuda EXTENSION_BUILT = True except ImportError: EXTENSION_BUILT = False def is_cuda_extension_usable() -> bool: """Check whether ngram_repeat_block_cuda is built properly""" if not EXTENSION_BUILT or not torch.cuda.is_available(): return False bsz = 2 tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") lprobs = torch.rand((8, 12), device="cuda") try: outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) outputs = outputs + 4 # This line breaks if the extension is built incorrectly. return True except RuntimeError: warnings.warn( "NGramRepeatBlock extension must be rebuilt." 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' ) return False class NGramRepeatBlock(nn.Module): """Wrapper class for calling ngram_repeat_block cuda extension""" def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True): super().__init__() self.use_extension = is_cuda_extension_usable() if use_extension else False self.no_repeat_ngram_size = no_repeat_ngram_size def reset_parameters(self): pass @torch.jit.unused def call_cuda_extension( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): return ngram_repeat_block_cuda.forward( tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size ) def forward( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): """ Args: tokens(Tensor): Input tokens(Bsz*beam, seq_len) lprobs(Tensor): likelihood probability, Expected to be updated in place.(Bsz*beam, vocab_size) bsz(int): batch size step(int): current step beam_size(int): beam size no_repeat_ngram_size(int): Ngram size """ msg = f"expected {bsz *beam_size} got" assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}" assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}" if self.use_extension: return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step) else: return self._no_repeat_ngram( tokens, lprobs, bsz, beam_size, step, ) def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf""" banned_tokens = [ torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) ] if step + 2 - self.no_repeat_ngram_size >= 0: cpu_tokens: List[List[int]] = tokens.cpu().tolist() check_start_pos = step + 2 - self.no_repeat_ngram_size for bbsz_idx in range(bsz * beam_size): ngram_to_check = cpu_tokens[bbsz_idx][ -(self.no_repeat_ngram_size - 1) : ] for i in range(check_start_pos): if ( ngram_to_check == cpu_tokens[bbsz_idx][i : i + self.no_repeat_ngram_size - 1] ): banned_tokens[bbsz_idx].append( cpu_tokens[bbsz_idx][i + self.no_repeat_ngram_size - 1] ) for bbsz_idx in range(bsz * beam_size): lprobs[bbsz_idx][ torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64) ] = torch.tensor(-math.inf).to(lprobs) return lprobs
EXA-1-master
exa/libraries/fairseq/fairseq/ngram_repeat_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace from typing import Union from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import DictConfig REGISTRIES = {} def setup_registry(registry_name: str, base_class=None, default=None, required=False): assert registry_name.startswith("--") registry_name = registry_name[2:].replace("-", "_") REGISTRY = {} REGISTRY_CLASS_NAMES = set() DATACLASS_REGISTRY = {} # maintain a registry of all registries if registry_name in REGISTRIES: return # registry already exists REGISTRIES[registry_name] = { "registry": REGISTRY, "default": default, "dataclass_registry": DATACLASS_REGISTRY, } def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs): if isinstance(cfg, DictConfig): choice = cfg._name if choice and choice in DATACLASS_REGISTRY: from_checkpoint = extra_kwargs.get("from_checkpoint", False) dc = DATACLASS_REGISTRY[choice] cfg = merge_with_parent(dc(), cfg, remove_missing=from_checkpoint) elif isinstance(cfg, str): choice = cfg if choice in DATACLASS_REGISTRY: cfg = DATACLASS_REGISTRY[choice]() else: choice = getattr(cfg, registry_name, None) if choice in DATACLASS_REGISTRY: cfg = DATACLASS_REGISTRY[choice].from_namespace(cfg) if choice is None: if required: raise ValueError("{} is required!".format(registry_name)) return None cls = REGISTRY[choice] if hasattr(cls, "build_" + registry_name): builder = getattr(cls, "build_" + registry_name) else: builder = cls if "from_checkpoint" in extra_kwargs: del extra_kwargs["from_checkpoint"] return builder(cfg, *extra_args, **extra_kwargs) def register_x(name, dataclass=None): def register_x_cls(cls): if name in REGISTRY: raise ValueError( "Cannot register duplicate {} ({})".format(registry_name, name) ) if cls.__name__ in REGISTRY_CLASS_NAMES: raise ValueError( "Cannot register {} with duplicate class name ({})".format( registry_name, cls.__name__ ) ) if base_class is not None and not issubclass(cls, base_class): raise ValueError( "{} must extend {}".format(cls.__name__, base_class.__name__) ) if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if cls.__dataclass is not None: DATACLASS_REGISTRY[name] = cls.__dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group=registry_name, node=node, provider="fairseq") REGISTRY[name] = cls return cls return register_x_cls return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
EXA-1-master
exa/libraries/fairseq/fairseq/registry.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig class SpeechGenerator(object): def __init__(self, model, vocoder, data_cfg: S2TDataConfig): self.model = model self.vocoder = vocoder stats_npz_path = data_cfg.global_cmvn_stats_npz self.gcmvn_stats = None if stats_npz_path is not None: self.gcmvn_stats = np.load(stats_npz_path) def gcmvn_denormalize(self, x): # x: B x T x C if self.gcmvn_stats is None: return x mean = torch.from_numpy(self.gcmvn_stats["mean"]).to(x) std = torch.from_numpy(self.gcmvn_stats["std"]).to(x) assert len(x.shape) == 3 and mean.shape[0] == std.shape[0] == x.shape[2] x = x * std.view(1, 1, -1).expand_as(x) return x + mean.view(1, 1, -1).expand_as(x) def get_waveform(self, feat): # T x C -> T return None if self.vocoder is None else self.vocoder(feat).squeeze(0) class AutoRegressiveSpeechGenerator(SpeechGenerator): def __init__( self, model, vocoder, data_cfg, max_iter: int = 6000, eos_prob_threshold: float = 0.5, ): super().__init__(model, vocoder, data_cfg) self.max_iter = max_iter self.eos_prob_threshold = eos_prob_threshold @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size()[:2] n_frames_per_step = model.decoder.n_frames_per_step out_dim = model.decoder.out_dim raw_dim = out_dim // n_frames_per_step # initialize encoder_out = model.forward_encoder( src_tokens, src_lengths, speaker=sample["speaker"] ) incremental_state = {} feat, attn, eos_prob = [], [], [] finished = src_tokens.new_zeros((bsz,)).bool() out_lens = src_lengths.new_zeros((bsz,)).long().fill_(self.max_iter) prev_feat_out = encoder_out["encoder_out"][0].new_zeros(bsz, 1, out_dim) for step in range(self.max_iter): cur_out_lens = out_lens.clone() cur_out_lens.masked_fill_(cur_out_lens.eq(self.max_iter), step + 1) _, cur_eos_out, cur_extra = model.forward_decoder( prev_feat_out, encoder_out=encoder_out, incremental_state=incremental_state, target_lengths=cur_out_lens, speaker=sample["speaker"], **kwargs, ) cur_eos_prob = torch.sigmoid(cur_eos_out).squeeze(2) feat.append(cur_extra["feature_out"]) attn.append(cur_extra["attn"]) eos_prob.append(cur_eos_prob) cur_finished = cur_eos_prob.squeeze(1) > self.eos_prob_threshold out_lens.masked_fill_((~finished) & cur_finished, step + 1) finished = finished | cur_finished if finished.sum().item() == bsz: break prev_feat_out = cur_extra["feature_out"] feat = torch.cat(feat, dim=1) feat = model.decoder.postnet(feat) + feat eos_prob = torch.cat(eos_prob, dim=1) attn = torch.cat(attn, dim=2) alignment = attn.max(dim=1)[1] feat = feat.reshape(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1) attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) out_lens = out_lens * n_frames_per_step finalized = [ { "feature": feat[b, :out_len], "eos_prob": eos_prob[b, :out_len], "attn": attn[b, :, :out_len], "alignment": alignment[b, :out_len], "waveform": self.get_waveform(feat[b, :out_len]), } for b, out_len in zip(range(bsz), out_lens) ] if has_targ: assert sample["target"].size(-1) == out_dim tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) tgt_lens = sample["target_lengths"] * n_frames_per_step for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized class MultiDecoderSpeechGenerator(SpeechGenerator): def __init__( self, models, args, vocoder, data_cfg, tgt_dict_mt, max_iter: int = 6000, eos_prob_threshold: float = 0.5, eos_mt=None, symbols_to_strip_from_output=None, ): super().__init__(models[0], vocoder, data_cfg) self.max_iter = max_iter self.eos_prob_threshold = eos_prob_threshold self.tgt_dict_mt = tgt_dict_mt self.eos_mt = eos_mt from examples.speech_to_speech.unity.sequence_generator import SequenceGenerator from fairseq import search self.text_generator = SequenceGenerator( models, tgt_dict_mt, beam_size=max(1, getattr(args, "beam", 5)), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), search_strategy=search.BeamSearch(tgt_dict_mt), eos=eos_mt, symbols_to_strip_from_output=symbols_to_strip_from_output, ) @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size()[:2] n_frames_per_step = model.decoder.n_frames_per_step out_dim = model.decoder.out_dim raw_dim = out_dim // n_frames_per_step # initialize encoder_out = model.forward_encoder( src_tokens, src_lengths, speaker=sample["speaker"] ) prefix_tokens = None constraints = None bos_token = None mt_decoder = getattr(model, f"{model.mt_task_name}_decoder") # 1. MT decoder finalized_mt = self.text_generator.generate_decoder( [encoder_out], src_tokens, src_lengths, sample, prefix_tokens, constraints, bos_token, aux_task_name=model.mt_task_name, ) # extract decoder output corresponding to the best hypothesis max_tgt_len = max([len(hypo[0]["tokens"]) for hypo in finalized_mt]) prev_output_tokens_mt = ( src_tokens.new_zeros(src_tokens.shape[0], max_tgt_len) .fill_(mt_decoder.padding_idx) .int() ) # B x T for i, hypo in enumerate(finalized_mt): i_beam = 0 tmp = hypo[i_beam]["tokens"].int() # hyp + eos prev_output_tokens_mt[i, 0] = self.text_generator.eos if tmp[-1] == self.text_generator.eos: tmp = tmp[:-1] prev_output_tokens_mt[i, 1 : len(tmp) + 1] = tmp text = "".join([self.tgt_dict_mt[c] for c in tmp]) text = text.replace("_", " ") text = text.replace("▁", " ") text = text.replace("<unk>", " ") text = text.replace("<s>", "") text = text.replace("</s>", "") if len(text) > 0 and text[0] == " ": text = text[1:] sample_id = sample["id"].tolist()[i] print("{} (None-{})".format(text, sample_id)) mt_decoder_out = mt_decoder( prev_output_tokens_mt, encoder_out=encoder_out, features_only=True, ) x = mt_decoder_out[0].transpose(0, 1) mt_decoder_padding_mask = None if prev_output_tokens_mt.eq(mt_decoder.padding_idx).any(): mt_decoder_padding_mask = prev_output_tokens_mt.eq(mt_decoder.padding_idx) # 2. TTS encoder if getattr(model, "synthesizer_encoder", None) is not None: synthesizer_encoder_out = model.synthesizer_encoder( x, mt_decoder_padding_mask, ) else: synthesizer_encoder_out = { "encoder_out": [x], # T x B x C "encoder_padding_mask": [mt_decoder_padding_mask] if mt_decoder_padding_mask is not None else [], # B x T "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } # 3. TTS decoder incremental_state = {} feat, attn, eos_prob = [], [], [] finished = src_tokens.new_zeros((bsz,)).bool() out_lens = src_lengths.new_zeros((bsz,)).long().fill_(self.max_iter) prev_feat_out = encoder_out["encoder_out"][0].new_zeros(bsz, 1, out_dim) for step in range(self.max_iter): cur_out_lens = out_lens.clone() cur_out_lens.masked_fill_(cur_out_lens.eq(self.max_iter), step + 1) _, cur_eos_out, cur_extra = model.forward_decoder( prev_feat_out, encoder_out=synthesizer_encoder_out, incremental_state=incremental_state, target_lengths=cur_out_lens, speaker=sample["speaker"], **kwargs, ) cur_eos_prob = torch.sigmoid(cur_eos_out).squeeze(2) feat.append(cur_extra["feature_out"]) attn.append(cur_extra["attn"]) eos_prob.append(cur_eos_prob) cur_finished = cur_eos_prob.squeeze(1) > self.eos_prob_threshold out_lens.masked_fill_((~finished) & cur_finished, step + 1) finished = finished | cur_finished if finished.sum().item() == bsz: break prev_feat_out = cur_extra["feature_out"] feat = torch.cat(feat, dim=1) feat = model.decoder.postnet(feat) + feat eos_prob = torch.cat(eos_prob, dim=1) attn = torch.cat(attn, dim=2) alignment = attn.max(dim=1)[1] feat = feat.reshape(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1) attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) out_lens = out_lens * n_frames_per_step finalized = [ { "feature": feat[b, :out_len], "eos_prob": eos_prob[b, :out_len], "attn": attn[b, :, :out_len], "alignment": alignment[b, :out_len], "waveform": self.get_waveform(feat[b, :out_len]), } for b, out_len in zip(range(bsz), out_lens) ] if has_targ: assert sample["target"].size(-1) == out_dim tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) tgt_lens = sample["target_lengths"] * n_frames_per_step for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized class NonAutoregressiveSpeechGenerator(SpeechGenerator): @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() bsz, max_src_len = sample["net_input"]["src_tokens"].size() n_frames_per_step = model.encoder.n_frames_per_step out_dim = model.encoder.out_dim raw_dim = out_dim // n_frames_per_step feat, feat_post, out_lens, log_dur_out, _, _ = model( src_tokens=sample["net_input"]["src_tokens"], src_lengths=sample["net_input"]["src_lengths"], prev_output_tokens=sample["net_input"]["prev_output_tokens"], incremental_state=None, target_lengths=sample["target_lengths"], speaker=sample["speaker"], ) if feat_post is not None: feat = feat_post feat = feat.view(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) dur_out = torch.clamp(torch.round(torch.exp(log_dur_out) - 1).long(), min=0) def get_dur_plot_data(d): r = [] for i, dd in enumerate(d): r += [i + 1] * dd.item() return r out_lens = out_lens * n_frames_per_step finalized = [ { "feature": feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim]), "waveform": self.get_waveform( feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim]) ), "attn": feat.new_tensor(get_dur_plot_data(dur_out[b])), } for b, l in zip(range(bsz), out_lens) ] if has_targ: tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) tgt_lens = sample["target_lengths"] * n_frames_per_step for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized class TeacherForcingAutoRegressiveSpeechGenerator(AutoRegressiveSpeechGenerator): @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() src_tokens = sample["net_input"]["src_tokens"] src_lens = sample["net_input"]["src_lengths"] prev_out_tokens = sample["net_input"]["prev_output_tokens"] tgt_lens = sample["target_lengths"] n_frames_per_step = model.decoder.n_frames_per_step raw_dim = model.decoder.out_dim // n_frames_per_step bsz = src_tokens.shape[0] feat, eos_prob, extra = model( src_tokens, src_lens, prev_out_tokens, incremental_state=None, target_lengths=tgt_lens, speaker=sample["speaker"], ) attn = extra["attn"] # B x T_s x T_t alignment = attn.max(dim=1)[1] feat = feat.reshape(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1) attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) tgt_lens = sample["target_lengths"] * n_frames_per_step finalized = [ { "feature": feat[b, :tgt_len], "eos_prob": eos_prob[b, :tgt_len], "attn": attn[b, :, :tgt_len], "alignment": alignment[b, :tgt_len], "waveform": self.get_waveform(feat[b, :tgt_len]), } for b, tgt_len in zip(range(bsz), tgt_lens) ] if has_targ: tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized
EXA-1-master
exa/libraries/fairseq/fairseq/speech_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import typing as tp def _safe_readline(fd) -> str: pos = fd.tell() while True: try: return fd.readline() except UnicodeDecodeError: pos -= 1 fd.seek(pos) # search where this character begins def find_offsets(filename: str, num_chunks: int) -> tp.List[int]: """ given a file and a number of chuncks, find the offsets in the file to be able to chunk around full lines. """ with open(filename, "r", encoding="utf-8") as f: size = os.fstat(f.fileno()).st_size chunk_size = size // num_chunks offsets = [0 for _ in range(num_chunks + 1)] for i in range(1, num_chunks): f.seek(chunk_size * i) _safe_readline(f) offsets[i] = f.tell() offsets[-1] = size return offsets class ChunkLineIterator: """ Iterator to properly iterate over lines of a file chunck. """ def __init__(self, fd, start_offset: int, end_offset: int): self._fd = fd self._start_offset = start_offset self._end_offset = end_offset def __iter__(self) -> tp.Iterable[str]: self._fd.seek(self._start_offset) # next(f) breaks f.tell(), hence readline() must be used line = _safe_readline(self._fd) while line: pos = self._fd.tell() # f.tell() does not always give the byte position in the file # sometimes it skips to a very large number # it is unlikely that through a normal read we go from # end bytes to end + 2**32 bytes (4 GB) and this makes it unlikely # that the procedure breaks by the undeterministic behavior of # f.tell() if ( self._end_offset > 0 and pos > self._end_offset and pos < self._end_offset + 2**32 ): break yield line line = self._fd.readline() class Chunker: """ contextmanager to read a chunck of a file line by line. """ def __init__(self, path: str, start_offset: int, end_offset: int): self.path = path self.start_offset = start_offset self.end_offset = end_offset def __enter__(self) -> ChunkLineIterator: self.fd = open(self.path, "r", encoding="utf-8") return ChunkLineIterator(self.fd, self.start_offset, self.end_offset) def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.fd.close()
EXA-1-master
exa/libraries/fairseq/fairseq/file_chunker_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import os import sys try: from .version import __version__ # noqa except ImportError: version_txt = os.path.join(os.path.dirname(__file__), "version.txt") with open(version_txt) as f: __version__ = f.read().strip() __all__ = ["pdb"] # backwards compatibility to support `from fairseq.X import Y` from fairseq.distributed import utils as distributed_utils from fairseq.logging import meters, metrics, progress_bar # noqa sys.modules["fairseq.distributed_utils"] = distributed_utils sys.modules["fairseq.meters"] = meters sys.modules["fairseq.metrics"] = metrics sys.modules["fairseq.progress_bar"] = progress_bar # initialize hydra from fairseq.dataclass.initialize import hydra_init hydra_init() import fairseq.criterions # noqa import fairseq.distributed # noqa import fairseq.models # noqa import fairseq.modules # noqa import fairseq.optim # noqa import fairseq.optim.lr_scheduler # noqa import fairseq.pdb # noqa import fairseq.scoring # noqa import fairseq.tasks # noqa import fairseq.token_generation_constraints # noqa import fairseq.benchmark # noqa import fairseq.model_parallel # noqa
EXA-1-master
exa/libraries/fairseq/fairseq/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import sys from typing import Dict, List, Optional import torch import torch.nn as nn from torch import Tensor from fairseq import search, utils from fairseq.data import data_utils from fairseq.models import FairseqIncrementalDecoder from fairseq.ngram_repeat_block import NGramRepeatBlock class SequenceGenerator(nn.Module): def __init__( self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, max_len=0, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None, symbols_to_strip_from_output=None, lm_model=None, lm_weight=1.0, tokens_to_suppress=(), ): """Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length max_len (int, optional): the maximum length of the generated output (not including end-of-sentence) min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) """ super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.tgt_dict = tgt_dict self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() if eos is None else eos self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) self.token_indices_to_suppress: Optional[Tensor] = None token_indices_to_suppress = [] for token_string in tokens_to_suppress: token_index = tgt_dict.index(token_string) assert token_index != self.unk token_indices_to_suppress.append(token_index) if len(token_indices_to_suppress) > 0: self.token_indices_to_suppress = torch.Tensor( token_indices_to_suppress ).long() self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.model.set_decoder_beam_size(self.beam_size) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.max_len = max_len or self.model.max_decoder_positions() self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.temperature = temperature self.match_source_len = match_source_len if no_repeat_ngram_size > 0: self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size) else: self.repeat_ngram_blocker = None assert temperature > 0, "--temperature must be greater than 0" self.search = ( search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy ) # We only need to set src_lengths in LengthConstrainedBeamSearch. # As a module attribute, setting it would break in multithread # settings when the model is shared. self.should_set_src_lengths = ( hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths ) self.model.eval() self.lm_model = lm_model self.lm_weight = lm_weight if self.lm_model is not None: self.lm_model.eval() def cuda(self): self.model.cuda() return self @torch.no_grad() def forward( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, bos_token: Optional[int] = None, ): """Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, prefix_tokens, bos_token=bos_token) # TODO(myleott): unused, deprecate after pytorch-translate migration def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): """Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations """ for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if "net_input" not in s: continue input = s["net_input"] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != "prev_output_tokens" } if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if timer is not None: timer.stop(sum(len(h[0]["tokens"]) for h in hypos)) for i, id in enumerate(s["id"].data): # remove padding src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad) ref = ( utils.strip_pad(s["target"].data[i, :], self.pad) if s["target"] is not None else None ) yield id, src, ref, hypos[i] @torch.no_grad() def generate( self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs ) -> List[List[Dict[str, Tensor]]]: """Generate translations. Match the api of other fairseq generators. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens constraints (torch.LongTensor, optional): force decoder to include the list of constraints bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, **kwargs) def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, constraints: Optional[Tensor] = None, bos_token: Optional[int] = None, ): incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(self.model.models_size) ], ) net_input = sample["net_input"] if "src_tokens" in net_input: src_tokens = net_input["src_tokens"] # length of the source text being the character length except EndOfSentence and pad # if src_lengths exists in net_input (speech_to_text dataset case), then use it if "src_lengths" in net_input: src_lengths = net_input["src_lengths"] else: src_lengths = ( (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)) .long() .sum(dim=1) ) elif "source" in net_input: src_tokens = net_input["source"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) elif "features" in net_input: src_tokens = net_input["features"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) else: raise Exception( "expected src_tokens or source in net input. input keys: " + str(net_input.keys()) ) # bsz: total number of sentences in beam # Note that src_tokens may have more than 2 dimensions (i.e. audio features) bsz, src_len = src_tokens.size()[:2] beam_size = self.beam_size if constraints is not None and not self.search.supports_constraints: raise NotImplementedError( "Target-side constraints were provided, but search method doesn't support them" ) # Initialize constraints, when active self.search.init_constraints(constraints, beam_size) max_len: int = -1 if self.match_source_len: max_len = src_lengths.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), self.max_len - 1, ) assert ( self.min_len <= max_len ), "min_len cannot be larger than max_len, please adjust these!" # compute the encoder output for each beam with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"): encoder_outs = self.model.forward_encoder(net_input) # placeholder of indices for bsz * beam_size to hold tokens and accumulative scores new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order) # ensure encoder_outs is a List. assert encoder_outs is not None # initialize buffers scores = ( torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float() ) # +1 for eos; pad is never chosen for scoring tokens = ( torch.zeros(bsz * beam_size, max_len + 2) .to(src_tokens) .long() .fill_(self.pad) ) # +2 for eos and pad tokens[:, 0] = self.eos if bos_token is None else bos_token attn: Optional[Tensor] = None # A list that indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = ( torch.zeros(bsz, beam_size).to(src_tokens).eq(-1) ) # forward and backward-compatible False mask # list of completed sentences finalized = torch.jit.annotate( List[List[Dict[str, Tensor]]], [torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)], ) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step # a boolean array indicating if the sentence at the index is finished or not finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of sentences remaining # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = ( (torch.arange(0, bsz) * beam_size) .unsqueeze(1) .type_as(tokens) .to(src_tokens.device) ) cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device) reorder_state: Optional[Tensor] = None batch_idxs: Optional[Tensor] = None original_batch_idxs: Optional[Tensor] = None if "id" in sample and isinstance(sample["id"], Tensor): original_batch_idxs = sample["id"] else: original_batch_idxs = torch.arange(0, bsz).type_as(tokens) for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as( batch_idxs ) reorder_state.view(-1, beam_size).add_( corr.unsqueeze(-1) * beam_size ) original_batch_idxs = original_batch_idxs[batch_idxs] self.model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = self.model.reorder_encoder_out( encoder_outs, reorder_state ) with torch.autograd.profiler.record_function( "EnsembleModel: forward_decoder" ): lprobs, avg_attn_scores = self.model.forward_decoder( tokens[:, : step + 1], encoder_outs, incremental_states, self.temperature, ) if self.lm_model is not None: lm_out = self.lm_model(tokens[:, : step + 1]) probs = self.lm_model.get_normalized_probs( lm_out, log_probs=True, sample=None ) probs = probs[:, -1, :] * self.lm_weight lprobs += probs lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # handle max length constraint if step >= max_len: lprobs[:, : self.eos] = -math.inf lprobs[:, self.eos + 1 :] = -math.inf # handle prefix tokens (possibly with different lengths) if ( prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len ): lprobs, tokens, scores = self._prefix_tokens( step, lprobs, scores, tokens, prefix_tokens, beam_size ) else: if step < self.min_len: # minimum length constraint (does not apply if using prefix_tokens) lprobs[:, self.eos] = -math.inf if self.token_indices_to_suppress is not None: lprobs[:, self.token_indices_to_suppress] = -math.inf # Record attention scores, only support avg_attn_scores is a Tensor if avg_attn_scores is not None: if attn is None: attn = torch.empty( bsz * beam_size, avg_attn_scores.size(1), max_len + 2 ).to(scores) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) eos_bbsz_idx = torch.empty(0).to( tokens ) # indices of hypothesis ending with eos (finished sentences) eos_scores = torch.empty(0).to( scores ) # scores of hypothesis ending with eos (finished sentences) if self.should_set_src_lengths: self.search.set_src_lengths(src_lengths) if self.repeat_ngram_blocker is not None: lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step) # Shape: (batch, cand_size) cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], tokens[:, : step + 1], original_batch_idxs, ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos # Shape of eos_mask: (batch size, beam size) eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf) eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask) # only consider eos when it's among the top beam_size indices # Now we know what beam item(s) to finish # Shape: 1d list of absolute-numbered eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents: List[int] = [] if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = self.finalize_hypos( step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, attn, src_lengths, max_len, ) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if self.search.stop_on_max_len and step >= max_len: break assert step < max_len, f"{step} < {max_len}" # Remove finalized sentences (ones for which {beam_size} # finished hypotheses have been generated) from the batch. if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = torch.ones( bsz, dtype=torch.bool, device=cand_indices.device ) batch_mask[finalized_sents] = False # TODO replace `nonzero(as_tuple=False)` after TorchScript supports it batch_idxs = torch.arange( bsz, device=cand_indices.device ).masked_select(batch_mask) # Choose the subset of the hypothesized constraints that will continue self.search.prune_sentences(batch_idxs) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths = src_lengths[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view( new_bsz * beam_size, attn.size(1), -1 ) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos # Rewrite the operator since the element wise or is not supported in torchscript. eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size])) active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just # the hypos with the smallest values in active_mask. # {active_hypos} indicates which {beam_size} hypotheses # from the list of {2 * beam_size} candidates were # selected. Shapes: (batch size, beam size) new_cands_to_ignore, active_hypos = torch.topk( active_mask, k=beam_size, dim=1, largest=False ) # update cands_to_ignore to ignore any finalized hypos. cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] # Make sure there is at least one active item for each sentence in the batch. assert (~cands_to_ignore).any(dim=1).all() # update cands_to_ignore to ignore any finalized hypos # {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam # can be selected more than once). active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos) active_scores = torch.gather(cand_scores, dim=1, index=active_hypos) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses # Set the tokens for each beam (can select the same row more than once) tokens[:, : step + 1] = torch.index_select( tokens[:, : step + 1], dim=0, index=active_bbsz_idx ) # Select the next token for each of them tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather( cand_indices, dim=1, index=active_hypos ) if step > 0: scores[:, :step] = torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx ) scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather( cand_scores, dim=1, index=active_hypos ) # Update constraints based on which candidates were selected for the next beam self.search.update_constraints(active_hypos) # copy attention for active hypotheses if attn is not None: attn[:, :, : step + 2] = torch.index_select( attn[:, :, : step + 2], dim=0, index=active_bbsz_idx ) # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): scores = torch.tensor( [float(elem["score"].item()) for elem in finalized[sent]] ) _, sorted_scores_indices = torch.sort(scores, descending=True) finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices] finalized[sent] = torch.jit.annotate( List[Dict[str, Tensor]], finalized[sent] ) return finalized def _prefix_tokens( self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int ): """Handle prefix tokens""" prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1)) prefix_mask = prefix_toks.ne(self.pad) lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs) lprobs[prefix_mask] = lprobs[prefix_mask].scatter( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask] ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[ :, 0, 1 : step + 1 ] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() # copy tokens, scores and lprobs from the first beam to all beams tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size) scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size) lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size) return lprobs, tokens, scores def replicate_first_beam(self, tensor, mask, beam_size: int): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) def finalize_hypos( self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[str, Tensor]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int, ): """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly. A sentence is finalized when {beam_size} finished items have been collected for it. Returns number of sentences (not beam items) being finalized. These will be removed from the batch and not processed further. Args: bbsz_idx (Tensor): """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors. # tokens is (batch * beam, max_len). So the index_select # gets the newly EOS rows, then selects cols 1..{step + 2} tokens_clone = tokens.index_select(0, bbsz_idx)[ :, 1 : step + 2 ] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = ( attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2] if attn is not None else None ) # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty # cum_unfin records which sentences in the batch are finished. # It helps match indexing between (a) the original sentences # in the batch and (b) the current, possibly-reduced set of # sentences. cum_unfin: List[int] = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx) unfin_idx = torch.div(bbsz_idx, beam_size, rounding_mode="trunc") sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx) # Create a set of "{sent}{unfin_idx}", where # "unfin_idx" is the index in the current (possibly reduced) # list of sentences, and "sent" is the index in the original, # unreduced batch # For every finished beam item # sentence index in the current (possibly reduced) batch seen = (sent << 32) + unfin_idx unique_seen: List[int] = torch.unique(seen).tolist() if self.match_source_len: condition = step > torch.index_select(src_lengths, 0, unfin_idx) eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores) sent_list: List[int] = sent.tolist() for i in range(bbsz_idx.size()[0]): # An input sentence (among those in a batch) is finished when # beam_size hypotheses have been collected for it if len(finalized[sent_list[i]]) < beam_size: if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i] else: hypo_attn = torch.empty(0) finalized[sent_list[i]].append( { "tokens": tokens_clone[i], "score": eos_scores[i], "attention": hypo_attn, # src_len x tgt_len "alignment": torch.empty(0), "positional_scores": pos_scores[i], } ) newly_finished: List[int] = [] for unique_s in unique_seen: # check termination conditions for this sentence unique_sent: int = unique_s >> 32 unique_unfin_idx: int = unique_s - (unique_sent << 32) if not finished[unique_sent] and self.is_finished( step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size ): finished[unique_sent] = True newly_finished.append(unique_unfin_idx) return newly_finished def is_finished( self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int, ): """ Check whether decoding for a sentence is finished, which occurs when the list of finalized sentences has reached the beam size, or when we reach the maximum length. """ assert finalized_sent_len <= beam_size if finalized_sent_len == beam_size or step == max_len: return True return False class EnsembleModel(nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models_size = len(models) # method '__len__' is not supported in ModuleList for torch script self.single_model = models[0] self.models = nn.ModuleList(models) self.has_incremental: bool = False if all( hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models ): self.has_incremental = True def forward(self): pass def has_encoder(self): return hasattr(self.single_model, "encoder") def has_incremental_states(self): return self.has_incremental def max_decoder_positions(self): return min( [ m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions") ] + [sys.maxsize] ) def set_decoder_beam_size(self, beam_size): """Set beam size for efficient beamable enc-dec attention.""" if beam_size > 1: for model in self.models: if hasattr(model, "set_beam_size"): model.set_beam_size(beam_size) @torch.jit.export def forward_encoder(self, net_input: Dict[str, Tensor]): if not self.has_encoder(): return None return [model.encoder.forward_torchscript(net_input) for model in self.models] @torch.jit.export def forward_decoder( self, tokens, encoder_outs: List[Dict[str, List[Tensor]]], incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], temperature: float = 1.0, ): log_probs = [] avg_attn: Optional[Tensor] = None encoder_out: Optional[Dict[str, List[Tensor]]] = None for i, model in enumerate(self.models): if self.has_encoder(): encoder_out = encoder_outs[i] # decode each model if self.has_incremental_states(): decoder_out = model.decoder.forward( tokens, encoder_out=encoder_out, incremental_state=incremental_states[i], ) else: if hasattr(model, "decoder"): decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out) else: decoder_out = model.forward(tokens) attn: Optional[Tensor] = None decoder_len = len(decoder_out) if decoder_len > 1 and decoder_out[1] is not None: if isinstance(decoder_out[1], Tensor): attn = decoder_out[1] else: attn_holder = decoder_out[1]["attn"] if isinstance(attn_holder, Tensor): attn = attn_holder elif attn_holder is not None: attn = attn_holder[0] if attn is not None: attn = attn[:, -1, :] decoder_out_tuple = ( decoder_out[0][:, -1:, :].div_(temperature), None if decoder_len <= 1 else decoder_out[1], ) probs = model.get_normalized_probs( decoder_out_tuple, log_probs=True, sample=None ) probs = probs[:, -1, :] if self.models_size == 1: return probs, attn log_probs.append(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log( self.models_size ) if avg_attn is not None: avg_attn.div_(self.models_size) return avg_probs, avg_attn @torch.jit.export def reorder_encoder_out( self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order ): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_outs: List[Dict[str, List[Tensor]]] = [] if not self.has_encoder(): return new_outs for i, model in enumerate(self.models): assert encoder_outs is not None new_outs.append( model.encoder.reorder_encoder_out(encoder_outs[i], new_order) ) return new_outs @torch.jit.export def reorder_incremental_state( self, incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): if not self.has_incremental_states(): return for i, model in enumerate(self.models): model.decoder.reorder_incremental_state_scripting( incremental_states[i], new_order ) class SequenceGeneratorWithAlignment(SequenceGenerator): def __init__( self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs ): """Generates translations of a given source sentence. Produces alignments following "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: left_pad_target (bool, optional): Whether or not the hypothesis should be left padded or not when they are teacher forced for generating alignments. """ super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs) self.left_pad_target = left_pad_target if print_alignment == "hard": self.extract_alignment = utils.extract_hard_alignment elif print_alignment == "soft": self.extract_alignment = utils.extract_soft_alignment @torch.no_grad() def generate(self, models, sample, **kwargs): finalized = super()._generate(sample, **kwargs) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] beam_size = self.beam_size ( src_tokens, src_lengths, prev_output_tokens, tgt_tokens, ) = self._prepare_batch_for_alignment(sample, finalized) if any(getattr(m, "full_context_alignment", False) for m in self.model.models): attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens) else: attn = [ finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0) for i in range(bsz * beam_size) ] if src_tokens.device != "cpu": src_tokens = src_tokens.to("cpu") tgt_tokens = tgt_tokens.to("cpu") attn = [i.to("cpu") for i in attn] # Process the attn matrix to extract hard alignments. for i in range(bsz * beam_size): alignment = self.extract_alignment( attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos ) finalized[i // beam_size][i % beam_size]["alignment"] = alignment return finalized def _prepare_batch_for_alignment(self, sample, hypothesis): src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] src_tokens = ( src_tokens[:, None, :] .expand(-1, self.beam_size, -1) .contiguous() .view(bsz * self.beam_size, -1) ) src_lengths = sample["net_input"]["src_lengths"] src_lengths = ( src_lengths[:, None] .expand(-1, self.beam_size) .contiguous() .view(bsz * self.beam_size) ) prev_output_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True, ) tgt_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False, ) return src_tokens, src_lengths, prev_output_tokens, tgt_tokens class EnsembleModelWithAlignment(EnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) def forward_align(self, src_tokens, src_lengths, prev_output_tokens): avg_attn = None for model in self.models: decoder_out = model(src_tokens, src_lengths, prev_output_tokens) attn = decoder_out[1]["attn"][0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(self.models) > 1: avg_attn.div_(len(self.models)) return avg_attn
EXA-1-master
exa/libraries/fairseq/fairseq/sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import multiprocessing import os import pdb import sys __all__ = ["set_trace"] _stdin = [None] _stdin_lock = multiprocessing.Lock() try: _stdin_fd = sys.stdin.fileno() except Exception: _stdin_fd = None class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if _stdin_fd is not None: if not _stdin[0]: _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
EXA-1-master
exa/libraries/fairseq/fairseq/pdb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line.split()
EXA-1-master
exa/libraries/fairseq/fairseq/tokenizer.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import os from typing import Any, Dict, Iterator, List import torch from omegaconf import open_dict from torch import nn from fairseq import utils from fairseq.data import encoders logger = logging.getLogger(__name__) def from_pretrained( model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == "checkpoint_file": checkpoint_file = v elif ( k != "path" # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path["path"] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith("."): kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs["data"] = file_utils.load_archive_file(data_name_or_path) for file, arg in { "code": "bpe_codes", "bpecodes": "bpe_codes", "sentencepiece.bpe.model": "sentencepiece_model", "merges.txt": "bpe_merges", "vocab.json": "bpe_vocab", }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if "user_dir" in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"])) model_path = [ os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep) ] if "is_vocoder" in kwargs: args = {"data": kwargs["data"], "model_path": model_path} task = None models = None else: models, args, task = checkpoint_utils.load_model_ensemble_and_task( model_path, arg_overrides=kwargs, ) if "generation_args" in kwargs and kwargs["generation_args"]: for key in kwargs["generation_args"]: setattr(args["generation"], key, kwargs["generation_args"][key]) return { "args": args, "task": task, "models": models, } class GeneratorHubInterface(nn.Module): """ PyTorch Hub interface for generating sequences from a pre-trained translation or language model. """ def __init__(self, cfg, task, models): super().__init__() self.cfg = cfg self.task = task self.models = nn.ModuleList(models) self.src_dict = task.source_dictionary self.tgt_dict = task.target_dictionary # optimize model for generation for model in self.models: model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) self.align_dict = utils.load_align_dict(cfg.generation.replace_unk) self.tokenizer = encoders.build_tokenizer(cfg.tokenizer) self.bpe = encoders.build_bpe(cfg.bpe) self.max_positions = utils.resolve_max_positions( self.task.max_positions(), *[model.max_positions() for model in models] ) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def translate( self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs ) -> List[str]: return self.sample(sentences, beam, verbose, **kwargs) def sample( self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs ) -> List[str]: if isinstance(sentences, str): return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0] tokenized_sentences = [self.encode(sentence) for sentence in sentences] batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs) return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos] def score( self, sentences: List[str], replace_newline_with_eos: bool = False, **kwargs ): if isinstance(sentences, str): return self.score( [sentences], replace_newline_with_eos=replace_newline_with_eos, **kwargs )[0] def encode(sentence): if replace_newline_with_eos: return torch.cat([self.encode(line) for line in sentence.splitlines()]) else: return self.encode(sentence) # NOTE: this doesn't support translation tasks currently tokenized_sentences = [encode(sentence) for sentence in sentences] return [ hypos[0] for hypos in self.generate( tokenized_sentences, score_reference=True, **kwargs ) ] def generate( self, tokenized_sentences: List[torch.LongTensor], beam: int = 5, verbose: bool = False, skip_invalid_size_inputs=False, inference_step_args=None, prefix_allowed_tokens_fn=None, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1: return self.generate( tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs )[0] # build generator using current args as well as any kwargs gen_args = copy.deepcopy(self.cfg.generation) with open_dict(gen_args): gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator( self.models, gen_args, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, ) inference_step_args = inference_step_args or {} results = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): batch = utils.apply_to_sample(lambda t: t.to(self.device), batch) translations = self.task.inference_step( generator, self.models, batch, **inference_step_args ) for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) # sort output to match input order outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] if verbose: def getarg(name, default): return getattr(gen_args, name, getattr(self.cfg, name, default)) for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs): src_str_with_unk = self.string(source_tokens) logger.info("S\t{}".format(src_str_with_unk)) for hypo in target_hypotheses: hypo_str = self.decode(hypo["tokens"]) logger.info("H\t{}\t{}".format(hypo["score"], hypo_str)) logger.info( "P\t{}".format( " ".join( map( lambda x: "{:.4f}".format(x), hypo["positional_scores"].tolist(), ) ) ) ) if hypo["alignment"] is not None and getarg( "print_alignment", False ): logger.info( "A\t{}".format( " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo["alignment"] ] ) ) ) return outputs def encode(self, sentence: str) -> torch.LongTensor: sentence = self.tokenize(sentence) sentence = self.apply_bpe(sentence) return self.binarize(sentence) def decode(self, tokens: torch.LongTensor) -> str: sentence = self.string(tokens) sentence = self.remove_bpe(sentence) return self.detokenize(sentence) def tokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.encode(sentence) return sentence def detokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.decode(sentence) return sentence def apply_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.encode(sentence) return sentence def remove_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.decode(sentence) return sentence def binarize(self, sentence: str) -> torch.LongTensor: return self.src_dict.encode_line(sentence, add_if_not_exist=False).long() def string(self, tokens: torch.LongTensor) -> str: return self.tgt_dict.string(tokens) def _build_batches( self, tokens: List[List[int]], skip_invalid_size_inputs: bool ) -> Iterator[Dict[str, Any]]: lengths = torch.LongTensor([t.numel() for t in tokens]) batch_iterator = self.task.get_batch_iterator( dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, disable_iterator_cache=True, ).next_epoch_itr(shuffle=False) return batch_iterator class BPEHubInterface(object): """PyTorch Hub interface for Byte-Pair Encoding (BPE).""" def __init__(self, bpe, **kwargs): super().__init__() args = argparse.Namespace(bpe=bpe, **kwargs) self.bpe = encoders.build_bpe(args) assert self.bpe is not None def encode(self, sentence: str) -> str: return self.bpe.encode(sentence) def decode(self, sentence: str) -> str: return self.bpe.decode(sentence) class TokenizerHubInterface(object): """PyTorch Hub interface for tokenization.""" def __init__(self, tokenizer, **kwargs): super().__init__() args = argparse.Namespace(tokenizer=tokenizer, **kwargs) self.tokenizer = encoders.build_tokenizer(args) assert self.tokenizer is not None def encode(self, sentence: str) -> str: return self.tokenizer.encode(sentence) def decode(self, sentence: str) -> str: return self.tokenizer.decode(sentence)
EXA-1-master
exa/libraries/fairseq/fairseq/hub_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos
EXA-1-master
exa/libraries/fairseq/fairseq/sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid from typing import Dict, Optional from torch import Tensor class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: str) -> str: return "{}.{}".format(self._incremental_state_id, key) def get_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple( b for b in cls.__bases__ if b != FairseqIncrementalState ) return cls
EXA-1-master
exa/libraries/fairseq/fairseq/incremental_decoding_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import TYPE_CHECKING, Callable, Dict, List, Optional import torch import torch.nn.functional as F from torch import Tensor if TYPE_CHECKING: from fairseq.modules.multihead_attention import MultiheadAttention try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False try: import torch_xla.core.xla_model as xm except ImportError: xm = None logger = logging.getLogger(__name__) MANIFOLD_PATH_SEP = "|" class FileContentsAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(FileContentsAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): from fairseq.file_io import PathManager if PathManager.isfile(values): with PathManager.open(values) as f: argument = f.read().strip() else: argument = values setattr(namespace, self.dest, argument) def split_paths(paths: str, separator=os.pathsep) -> List[str]: return ( paths.split(separator) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) ) def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task ) def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, collections.OrderedDict): # OrderedDict has attributes that needs to be preserved od = collections.OrderedDict( (key, _apply(value)) for key, value in x.items() ) od.__dict__ = x.__dict__ return od elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample) def move_to_tpu(sample): import torch_xla.core.xla_model as xm device = xm.xla_device() def _move_to_tpu(tensor): return tensor.to(device) return apply_to_sample(_move_to_tpu, sample) def get_incremental_state( module: "MultiheadAttention", incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key) def set_incremental_state( module: "MultiheadAttention", incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None, ): hypo_str = tgt_dict.string( hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore ) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max, device="cpu"): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor().to(device) if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): # tpu-comment: making this a no-op for xla devices. if torch.is_tensor(tensor) and tensor.device.type == "xla": return tensor.detach() if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm( chunk_size, has_inf, [cur_device_grads], False ) norms.append(norm[0].to(torch.cuda.current_device())) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm @torch.no_grad() def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: def grad_exists(p): return p is not None and getattr(p, "grad", None) is not None if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [ p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, "expert") ] expert_grads = [ p.grad.detach() for p in params if grad_exists(p) and hasattr(p, "expert") ] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.0) else: return torch.tensor(0.0) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) torch._foreach_mul_(grads + expert_grads, clip_coef) return total_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t) def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path) and not os.path.isfile( os.path.dirname(module_path) ): fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: raise FileNotFoundError(module_path) # ensure that user modules are only imported once import_user_module.memo = getattr(import_user_module, "memo", set()) if module_path not in import_user_module.memo: import_user_module.memo.add(module_path) module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) tasks_path = os.path.join(module_path, "tasks") if os.path.exists(tasks_path): from fairseq.tasks import import_tasks import_tasks(tasks_path, f"{module_name}.tasks") models_path = os.path.join(module_path, "models") if os.path.exists(models_path): from fairseq.models import import_models import_models(models_path, f"{module_name}.models") elif module_path in sys.modules[module_name].__path__: logger.info(f"--user-dir={module_path} has already been imported.") else: raise ImportError( "Failed to import --user-dir={} because the corresponding module name " "({}) is not globally unique. Please rename the directory to " "something unique and try again.".format(module_path, module_name) ) def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32) def get_perplexity(loss, round=2, base=2): from fairseq.logging.meters import safe_round if loss is None: return 0.0 try: return safe_round(base**loss, round) except OverflowError: return float("inf") def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def relu_squared(x: torch.Tensor): return F.relu(x).pow(2) def get_activation_fn(activation: str) -> Callable: """Returns the activation function corresponding to `activation`""" from fairseq.modules import gelu, gelu_accurate if activation == "relu": return F.relu elif activation == "relu_squared": return relu_squared elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x elif activation == "swish": return torch.nn.SiLU else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ] @contextlib.contextmanager def model_eval(model): is_training = model.training model.eval() yield model.train(is_training) def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False def get_rng_state(): state = {"torch_rng_state": torch.get_rng_state()} if xm is not None: state["xla_rng_state"] = xm.get_rng_state() if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(state): torch.set_rng_state(state["torch_rng_state"]) if xm is not None: xm.set_rng_state(state["xla_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = get_rng_state() torch.manual_seed(seed) if xm is not None: xm.set_rng_state(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def __enter__(self): return self def __exit__(self, *exc): set_rng_state(self.rng_state) def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_invalid = ( ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ((tgt_sent != pad)).nonzero(as_tuple=False) src_valid = ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1) alignment = [] if len(tgt_valid) != 0 and len(src_valid) != 0: attn_valid = attn[tgt_valid, src_valid] alignment = [ ["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid ] return alignment def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def get_tpu_device(): return xm.xla_device() def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl from fairseq.data import iterators xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), ) def is_xla_tensor(tensor): return torch.is_tensor(tensor) and tensor.device.type == "xla" def index_put(tensor, indices, value): if is_xla_tensor(tensor): for _ in range(indices.dim(), tensor.dim()): indices = indices.unsqueeze(-1) if indices.size(-1) < tensor.size(-1): indices = indices.expand_as(tensor) tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices) else: tensor[indices] = value return tensor def xla_device_to_cpu(dat): import torch_xla.core.xla_model as xm return xm._maybe_convert_to_cpu(dat) class CudaEnvironment(object): def __init__(self): cur_device = torch.cuda.current_device() prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device)) self.name = prop.name self.major = prop.major self.minor = prop.minor self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024 @staticmethod def pretty_print_cuda_env_list(cuda_env_list): """ Given a list of CudaEnviorments, pretty print them """ num_workers = len(cuda_env_list) center = "CUDA enviroments for all {} workers".format(num_workers) banner_len = 40 - len(center) // 2 first_line = "*" * banner_len + center + "*" * banner_len logger.info(first_line) for r, env in enumerate(cuda_env_list): logger.info( "rank {:3d}: ".format(r) + "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor) + "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB) + "name = {:40s}".format(env.name) ) logger.info(first_line) def csv_str_list(x): return x.split(",") def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def safe_hasattr(obj, k): """Returns True if the given key exists and is not None.""" return getattr(obj, k, None) is not None def hotreload_function(name=None): """ Decorator to function to enable hot-reload for debugging. It allows you to debug a function without having reloading all heavy models, dataset loading and preprocessing, allow faster debugging. If you want to change model or dataset loading, consider relaunching your code ----------------------------------- This will run the decorated function func: if func run successful: It will pause, allow user to edit code, and prompt user to: Press enter to re-run the function with updated code Type "done" to finish the function, return output Type "disable" to stop pausing this function and let code continue without pause Ctril + C to terminal if func raise error: it will prompt user to 1. Edit code, and press enter to retry 2. Ctrl + C to terminate 3. Type "raise" to raise that exception * Requirements: 0. Fairseq was installed with `pip install --editable .` 1. pip install jurigged[develoop] 2. set environment HOTRELOAD_PAUSE=1 CUDA_LAUNCH_BLOCKING=1 3. Run on only 1 GPU (no distributed) * How to use: 1. in python, import and decorate the top-level function to be re-run after code edits: ```python from fairseq.utils import hotreload_function .... @hotreload_function("train_step") def train_step(self, sample ....): .... .... ``` 2. in bash run scripts: ```bash watch_dir=<home>/fairseq-py/fairseq/tasks # directory to watch for file changes export CUDA_VISIBLE_DEVICES=0 # single-gpu HOTRELOAD_PAUSE=1 CUDA_LAUNCH_BLOCKING=1 python -m jurigged -w ${watch_dir} --poll 2 -v train.py ...... ``` * NOTE: 1. -w ${watch_dir} specify all the files to be watched for changes once functions, class, ... code are changed, all instances in the process will get updated (hot-reload) * Limitation: * Currently distributed debugging not working * Need to launch train.py locally (cannot submit jobs) """ try: import jurigged except ImportError as e: logger.warning("Please install jurigged: pip install jurigged[develoop]") raise e from fairseq.distributed import utils as distributed_utils import traceback def hotreload_decorator(func): assert callable(func), f"not callable: {func}" jname = name or func.__name__ logger.info(f"jurigged-hotreload:Apply jurigged on {jname}:{func.__name__}") HOTRELOAD_PAUSE = bool(os.environ.get("HOTRELOAD_PAUSE", 0)) cublk = bool(os.environ.get("CUDA_LAUNCH_BLOCKING", 0)) prefix = f"HOTRELOAD:{jname}:[cublk={cublk}]" hot_reload_state = {"disable": False} def func_wrapper(*args, **kwargs): if not HOTRELOAD_PAUSE or hot_reload_state["disable"]: return func(*args, **kwargs) world_size = distributed_utils.get_global_world_size() assert ( world_size <= 1 ), f"HOTRELOAD_PAUSE:{jname} currently cannot do distributed training" success = False while not success: try: output = func(*args, **kwargs) # success = True end_action = input( f"{prefix}: PAUSE, you may edit code now. Enter to re-run, ctrl+C to terminate, " f'type "done" to continue (function still being watched), or type "disable" to stop pausing this function :' ) if end_action.strip().lower() in ["disable", "done"]: success = True else: logger.warning( f"{prefix}: action={end_action} function will re-run now." ) except Exception as e: action = input( f"{prefix}:ERROR: \n{traceback.format_exc()}\n" f'Edit code to try again: enter to continue, ctrl+C to terminate, or type "raise" to raise the exception: ' ) if action.strip().lower() == "raise": raise e if end_action.strip().lower() == "disable": logger.warning( f"{prefix}: Stop pausing {jname}. The function is still being watched and newly editted code will take effect " f"if the {jname} is called again later." f' "unset HOTRELOAD_PAUSE" before relaunch to disable hotreload and' f" remove @hotreload_function decorator in the code." ) hot_reload_state["disable"] = True return output return func_wrapper return hotreload_decorator
EXA-1-master
exa/libraries/fairseq/fairseq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import collections import contextlib import inspect import logging import os import re import time import traceback from collections import OrderedDict from pathlib import Path from typing import Any, Dict, Optional, Union import numpy as np import torch from fairseq.data import data_utils from fairseq.dataclass.configs import CheckpointConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, OmegaConf, open_dict logger = logging.getLogger(__name__) def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): from fairseq import meters # only one worker should attempt to create the required dir if trainer.data_parallel_rank == 0: os.makedirs(cfg.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) if val_loss is not None: best_function = max if cfg.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if cfg.no_save: return None trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state if not trainer.should_save_checkpoint_on_current_rank: if trainer.always_call_state_dict_during_save_checkpoint: trainer.state_dict() return None write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") def is_better(a, b): return a >= b if cfg.maximize_best_checkpoint_metric else a <= b suffix = trainer.checkpoint_suffix checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and cfg.save_interval_updates > 0 and updates % cfg.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and cfg.keep_best_checkpoints > 0: worst_best = getattr(save_checkpoint, "best", None) chkpts = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if len(chkpts) > 0: p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) # add random digits to resolve ties with data_utils.numpy_seed(epoch, updates, val_loss): rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) checkpoint_conds[ "checkpoint.best_{}_{:.3f}{}{}.pt".format( cfg.best_checkpoint_metric, val_loss, rand_sfx, suffix ) ] = worst_best is None or is_better(val_loss, worst_best) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not cfg.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) checkpoints = [ os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] saved_cp = None if len(checkpoints) > 0 and trainer.should_save_checkpoint_on_current_rank: saved_cp = trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: if cfg.write_checkpoints_asynchronously: # TODO[ioPath]: Need to implement a delayed asynchronous # file copying/moving feature. logger.warning( f"ioPath is not copying {checkpoints[0]} to {cp} " "since async write mode is on." ) else: assert PathManager.copy( checkpoints[0], cp, overwrite=True ), f"Failed to copy {checkpoints[0]} to {cp}" write_timer.stop() logger.info( "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if ( not end_of_epoch and cfg.keep_interval_updates > 0 and trainer.should_save_checkpoint_on_current_rank ): # remove old checkpoints; checkpoints are sorted in descending order if cfg.keep_interval_updates_pattern == -1: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) ) else: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), keep_match=True, ) checkpoints = [ x[0] for x in checkpoints if x[1] % cfg.keep_interval_updates_pattern != 0 ] for old_chk in checkpoints[cfg.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_last_epochs > 0 and trainer.should_save_checkpoint_on_current_rank: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) ) for old_chk in checkpoints[cfg.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_best_checkpoints > 0 and trainer.should_save_checkpoint_on_current_rank: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if not cfg.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[cfg.keep_best_checkpoints :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) return saved_cp def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if cfg.finetune_from_model is not None and ( reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader ): raise ValueError( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" ) suffix = trainer.checkpoint_suffix if ( cfg.restore_file == "checkpoint_last.pt" ): # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join( cfg.save_dir, "checkpoint_last{}.pt".format(suffix) ) first_launch = not PathManager.exists(checkpoint_path) if first_launch and getattr(cfg, "continue_once", None) is not None: checkpoint_path = cfg.continue_once elif cfg.finetune_from_model is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(cfg.finetune_from_model): checkpoint_path = cfg.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info( f"loading pretrained model from {checkpoint_path}: " "optimizer, lr scheduler, meters, dataloader will be reset" ) else: raise ValueError( f"--finetune-from-model {cfg.finetune_from_model} does not exist" ) elif suffix is not None: checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = cfg.restore_file if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: raise ValueError( "--finetune-from-model and --restore-file (non-default value) " "can not be specified together: " + str(cfg) ) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): """Loads a checkpoint to CPU (with upgrading for backward compatibility). If doing single-GPU training or if the checkpoint is only being loaded by at most one process on each node (current default behavior is for only rank 0 to read the checkpoint from disk), load_on_all_ranks should be False to avoid errors from torch.distributed not having been initialized or torch.distributed.barrier() hanging. If all processes on each node may be loading the checkpoint simultaneously, load_on_all_ranks should be set to True to avoid I/O conflicts. There's currently no support for > 1 but < all processes loading the checkpoint on each node. """ local_path = PathManager.get_local_path(path) # The locally cached file returned by get_local_path() may be stale for # remote files that are periodically updated/overwritten (ex: # checkpoint_last.pt) - so we remove the local copy, sync across processes # (if needed), and then download a fresh copy. if local_path != path and PathManager.path_requires_pathmanager(path): try: os.remove(local_path) except FileNotFoundError: # With potentially multiple processes removing the same file, the # file being missing is benign (missing_ok isn't available until # Python 3.8). pass if load_on_all_ranks: torch.distributed.barrier() local_path = PathManager.get_local_path(path) with open(local_path, "rb") as f: state = torch.load(f, map_location=torch.device("cpu")) if "args" in state and state["args"] is not None and arg_overrides is not None: args = state["args"] for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) if "cfg" in state and state["cfg"] is not None: # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import __version__ as oc_version from omegaconf import _utils if oc_version < "2.2": old_primitive = _utils.is_primitive_type _utils.is_primitive_type = lambda _: True state["cfg"] = OmegaConf.create(state["cfg"]) _utils.is_primitive_type = old_primitive OmegaConf.set_struct(state["cfg"], True) else: state["cfg"] = OmegaConf.create(state["cfg"], flags={"allow_objects": True}) if arg_overrides is not None: overwrite_args_by_name(state["cfg"], arg_overrides) state = _upgrade_state_dict(state) return state def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, num_shards, state, ) return ensemble, args def get_maybe_sharded_checkpoint_filename( filename: str, suffix: str, shard_idx: int, num_shards: int ) -> str: orig_filename = filename filename = filename.replace(".pt", suffix + ".pt") fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt" model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt" if PathManager.exists(fsdp_filename): return fsdp_filename elif num_shards > 1: return model_parallel_filename else: return filename def load_model_ensemble_and_task( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): assert state is None or len(filenames) == 1 from fairseq import tasks assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble = [] cfg = None for filename in filenames: orig_filename = filename model_shard_state = {"shard_weights": [], "shard_metadata": []} assert num_shards > 0 st = time.time() for shard_idx in range(num_shards): filename = get_maybe_sharded_checkpoint_filename( orig_filename, suffix, shard_idx, num_shards ) if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) if state is None: state = load_checkpoint_to_cpu(filename, arg_overrides) if "args" in state and state["args"] is not None: cfg = convert_namespace_to_omegaconf(state["args"]) elif "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] else: raise RuntimeError( f"Neither args nor cfg exist in state keys = {state.keys()}" ) if task is None: task = tasks.setup_task(cfg.task, from_checkpoint=True) if "task_state" in state: task.load_state_dict(state["task_state"]) argspec = inspect.getfullargspec(task.build_model) if "fsdp_metadata" in state and num_shards > 1: model_shard_state["shard_weights"].append(state["model"]) model_shard_state["shard_metadata"].append(state["fsdp_metadata"]) # check FSDP import before the code goes too far if not has_FSDP: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if shard_idx == num_shards - 1: consolidated_model_state = FSDP.consolidate_shard_weights( shard_weights=model_shard_state["shard_weights"], shard_metadata=model_shard_state["shard_metadata"], ) if "from_checkpoint" in argspec.args: model = task.build_model(cfg.model, from_checkpoint=True) else: model = task.build_model(cfg.model) if ( "optimizer_history" in state and len(state["optimizer_history"]) > 0 and "num_updates" in state["optimizer_history"][-1] ): model.set_num_updates( state["optimizer_history"][-1]["num_updates"] ) model.load_state_dict( consolidated_model_state, strict=strict, model_cfg=cfg.model ) else: # model parallel checkpoint or unsharded checkpoint # support old external tasks if "from_checkpoint" in argspec.args: model = task.build_model(cfg.model, from_checkpoint=True) else: model = task.build_model(cfg.model) if ( "optimizer_history" in state and len(state["optimizer_history"]) > 0 and "num_updates" in state["optimizer_history"][-1] ): model.set_num_updates(state["optimizer_history"][-1]["num_updates"]) model.load_state_dict( state["model"], strict=strict, model_cfg=cfg.model ) # reset state so it gets loaded for the next model in ensemble state = None if shard_idx % 10 == 0 and shard_idx > 0: elapsed = time.time() - st logger.info( f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard" ) # build model for ensemble ensemble.append(model) return ensemble, cfg, task def load_model_ensemble_and_task_from_hf_hub( model_id, cache_dir: Optional[str] = None, arg_overrides: Optional[Dict[str, Any]] = None, **kwargs: Any, ): try: from huggingface_hub import snapshot_download except ImportError: raise ImportError( "You need to install huggingface_hub to use `load_from_hf_hub`. " "See https://pypi.org/project/huggingface-hub/ for installation." ) library_name = "fairseq" cache_dir = cache_dir or (Path.home() / ".cache" / library_name).as_posix() cache_dir = snapshot_download( model_id, cache_dir=cache_dir, library_name=library_name, **kwargs ) _arg_overrides = arg_overrides or {} _arg_overrides["data"] = cache_dir return load_model_ensemble_and_task( [p.as_posix() for p in Path(cache_dir).glob("*.pt")], arg_overrides=_arg_overrides, ) def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = PathManager.ls(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = float(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) if keep_match: return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)] else: return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def torch_persistent_save(obj, filename, async_write: bool = False): if async_write: with PathManager.opena(filename, "wb") as f: _torch_persistent_save(obj, f) else: if PathManager.supports_rename(filename): # do atomic save with PathManager.open(filename + ".tmp", "wb") as f: _torch_persistent_save(obj, f) PathManager.rename(filename + ".tmp", filename) else: # fallback to non-atomic save with PathManager.open(filename, "wb") as f: _torch_persistent_save(obj, f) def _torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, "wb") as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if i == 2: logger.error(traceback.format_exc()) raise else: time.sleep(2.5) def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" # add optimizer_history if "optimizer_history" not in state: state["optimizer_history"] = [ {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} ] state["last_optimizer_state"] = state["optimizer"] del state["optimizer"] del state["best_loss"] # move extra_state into sub-dictionary if "epoch" in state and "extra_state" not in state: state["extra_state"] = { "epoch": state["epoch"], "batch_offset": state["batch_offset"], "val_loss": state["val_loss"], } del state["epoch"] del state["batch_offset"] del state["val_loss"] # reduce optimizer history's memory usage (only keep the last state) if "optimizer" in state["optimizer_history"][-1]: state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] for optim_hist in state["optimizer_history"]: del optim_hist["optimizer"] # record the optimizer class name if "optimizer_name" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" # move best_loss into lr_scheduler_state if "lr_scheduler_state" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["lr_scheduler_state"] = { "best": state["optimizer_history"][-1]["best_loss"] } del state["optimizer_history"][-1]["best_loss"] # keep track of number of updates if "num_updates" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["num_updates"] = 0 # use stateful training data iterator if "train_iterator" not in state["extra_state"]: state["extra_state"]["train_iterator"] = { "epoch": state["extra_state"].get("epoch", 0), "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), } # backward compatibility, cfg updates if "args" in state and state["args"] is not None: # old model checkpoints may not have separate source/target positions if hasattr(state["args"], "max_positions") and not hasattr( state["args"], "max_source_positions" ): state["args"].max_source_positions = state["args"].max_positions state["args"].max_target_positions = state["args"].max_positions # default to translation task if not hasattr(state["args"], "task"): state["args"].task = "translation" # --raw-text and --lazy-load are deprecated if getattr(state["args"], "raw_text", False): state["args"].dataset_impl = "raw" elif getattr(state["args"], "lazy_load", False): state["args"].dataset_impl = "lazy" # epochs start at 1 if state["extra_state"]["train_iterator"] is not None: state["extra_state"]["train_iterator"]["epoch"] = max( state["extra_state"]["train_iterator"].get("epoch", 1), 1 ) # --remove-bpe ==> --postprocess if hasattr(state["args"], "remove_bpe"): state["args"].post_process = state["args"].remove_bpe # --min-lr ==> --stop-min-lr if hasattr(state["args"], "min_lr"): state["args"].stop_min_lr = state["args"].min_lr del state["args"].min_lr # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion if hasattr(state["args"], "criterion") and state["args"].criterion in [ "binary_cross_entropy", "kd_binary_cross_entropy", ]: state["args"].criterion = "wav2vec" # remove log_keys if it's None (criteria will supply a default value of []) if hasattr(state["args"], "log_keys") and state["args"].log_keys is None: delattr(state["args"], "log_keys") # speech_pretraining => audio pretraining if ( hasattr(state["args"], "task") and state["args"].task == "speech_pretraining" ): state["args"].task = "audio_pretraining" # audio_cpc => wav2vec if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": state["args"].arch = "wav2vec" # convert legacy float learning rate to List[float] if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): state["args"].lr = [state["args"].lr] # convert task data arg to a string instead of List[string] if ( hasattr(state["args"], "data") and isinstance(state["args"].data, list) and len(state["args"].data) > 0 ): state["args"].data = state["args"].data[0] state["cfg"] = convert_namespace_to_omegaconf(state["args"]) if "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] with open_dict(cfg): # any upgrades for Hydra-based configs if ( "task" in cfg and "eval_wer_config" in cfg.task and isinstance(cfg.task.eval_wer_config.print_alignment, bool) ): cfg.task.eval_wer_config.print_alignment = "hard" if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool): cfg.generation.print_alignment = ( "hard" if cfg.generation.print_alignment else None ) if ( "model" in cfg and "w2v_args" in cfg.model and cfg.model.w2v_args is not None and ( hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args ) and hasattr(cfg.model.w2v_args.task, "eval_wer_config") and cfg.model.w2v_args.task.eval_wer_config is not None and isinstance( cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool ) ): cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard" return state def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ arch = None if model_cfg is not None: arch = ( model_cfg._name if isinstance(model_cfg, DictConfig) else getattr(model_cfg, "arch", None) ) if not model_cfg or arch is None or arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( int(layer_string) for layer_string in layers_to_keep.split(",") ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if isinstance(model_cfg, DictConfig): context = open_dict(model_cfg) else: context = contextlib.ExitStack() with context: if hasattr(model_cfg, "encoder_layers_to_keep"): model_cfg.encoder_layers_to_keep = None if hasattr(model_cfg, "decoder_layers_to_keep"): model_cfg.decoder_layers_to_keep = None return new_state_dict def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str, strict: bool = True, ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=strict) return component def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning( "Unable to access checkpoint save directory: {}".format(save_dir) ) raise e else: os.remove(temp_file_path) def save_ema_as_checkpoint(src_path, dst_path): state = load_ema_from_checkpoint(src_path) torch_persistent_save(state, dst_path) def load_ema_from_checkpoint(fpath): """Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() new_state = None with PathManager.open(fpath, "rb") as f: new_state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # EMA model is stored in a separate "extra state" model_params = new_state["extra_state"]["ema"] for key in list(model_params.keys()): p = model_params[key] if isinstance(p, torch.HalfTensor): p = p.float() if key not in params_dict: params_dict[key] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: raise ValueError("Key {} is repeated in EMA model params.".format(key)) if len(params_dict) == 0: raise ValueError( f"Input checkpoint path '{fpath}' does not contain " "ema model weights, is this model trained with EMA?" ) new_state["model"] = params_dict return new_state
EXA-1-master
exa/libraries/fairseq/fairseq/checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from fairseq.modules.quantization import pq, quantization_options, scalar from omegaconf import DictConfig logger = logging.getLogger(__name__) def quantize_model_scalar(model, model_cfg: DictConfig): quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0 if quant_noise_scalar > 0: # quantize_model edits the model in place scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000) return model class Quantizer(object): def __init__(self, config_path, max_epoch, max_update): try: import yaml except ImportError: raise ImportError("Please install yaml with: pip install yaml") # parse config if config_path: with open(config_path) as config_file: config = quantization_options.parse_config_yaml( yaml.safe_load(config_file) ) else: config = quantization_options.parse_config_yaml({}) self.n_centroids_config = config["n_centroids"] self.block_sizes_config = config["block_sizes"] self.layers_to_quantize = config["layers_to_quantize"] # We assume that training will run for a fixed number of epochs # (or updates) and that we should train for equal durations # between iterations of PQ. num_iterations = len(self.layers_to_quantize) if max_epoch > 0: assert max_epoch % num_iterations == 0, ( "for iterative PQ, --max-epoch (={}) must be evenly divisible by " "len(layers_to_quantize) (={})".format(max_epoch, num_iterations) ) self.epoch_schedule = max_epoch // num_iterations else: self.epoch_schedule = None if max_update > 0: assert max_update % num_iterations == 0, ( "for iterative PQ, --max-update (={}) must be evenly divisible by " "len(layers_to_quantize) (={})".format(max_update, num_iterations) ) self.update_schedule = max_update // num_iterations else: self.update_schedule = None assert (self.epoch_schedule is not None) ^ ( self.update_schedule is not None ), "for iterative PQ, cannot specify both --max-update and --max-epoch" # 0 is a special value for quantization step, which will force # the first call to begin_epoch() to call step() self.quantization_step = 0 def set_trainer(self, trainer): self.trainer = trainer self.size_tracker = pq.SizeTracker(self.trainer.get_model()) def step(self): """Move to the next stage of quantization.""" if self.quantization_step >= len(self.layers_to_quantize): # Maybe we just finished the last training step or we loaded # a checkpoint for an iterative PQ model which previously # finished training. Either way, don't quantize again. return logger.info( "quantizing model (step={}; layers_to_quantize[step]={})".format( self.quantization_step, self.layers_to_quantize[self.quantization_step] ) ) quantized_layers = pq.quantize_model_( self.trainer.get_model(), self.size_tracker, self.layers_to_quantize, self.block_sizes_config, self.n_centroids_config, step=self.quantization_step, ) logger.info("quantized layers: {}".format(quantized_layers)) logger.info(self.size_tracker) self.quantization_step += 1 # reintialize the Trainer since model parameters have changed self.trainer.reinitialize() def begin_epoch(self, epoch): """Called at the beginning of each epoch (epochs start at 1).""" if ( ( self.epoch_schedule is not None and epoch > 0 and (epoch - 1) % self.epoch_schedule == 0 ) # we always step once in the beginning, even if using # update-based quantization or self.quantization_step == 0 ): self.step() def step_update(self, num_updates): """Called at the end of each step.""" if ( self.update_schedule is not None and num_updates > 0 and num_updates % self.update_schedule == 0 ): self.step() def state_dict(self): return { "n_centroids_config": self.n_centroids_config, "block_sizes_config": self.block_sizes_config, "layers_to_quantize": self.layers_to_quantize, "epoch_schedule": self.epoch_schedule, "update_schedule": self.update_schedule, "quantization_step": self.quantization_step, } def load_state_dict(self, state_dict): self.n_centroids_config = state_dict["n_centroids_config"] self.block_sizes_config = state_dict["block_sizes_config"] self.layers_to_quantize = state_dict["layers_to_quantize"] self.epoch_schedule = state_dict["epoch_schedule"] self.update_schedule = state_dict["update_schedule"] self.quantization_step = state_dict["quantization_step"]
EXA-1-master
exa/libraries/fairseq/fairseq/quantization_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Utilities for working with the local dataset cache. This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_. and `huggingface <https://github.com/huggingface>`_. """ import fnmatch import json import logging import os import shutil import tarfile import tempfile from functools import partial, wraps from hashlib import sha256 from io import open try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv( "TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch") ) ) default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq") try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)) except (AttributeError, ImportError): PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path) CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" logger = logging.getLogger(__name__) # pylint: disable=invalid-name def load_archive_file(archive_file): # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=None) except EnvironmentError: logger.info( "Archive name '{}' was not found in archive name list. " "We assumed '{}' was a path or URL but couldn't find any file " "associated to this path or URL.".format( archive_file, archive_file, ) ) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info( "loading archive file {} from cache at {}".format( archive_file, resolved_archive_file ) ) # Extract archive to temp dir and replace .tar.bz2 if necessary tempdir = None if not os.path.isdir(resolved_archive_file): tempdir = tempfile.mkdtemp() logger.info( "extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir ) ) ext = os.path.splitext(archive_file)[1][1:] with tarfile.open(resolved_archive_file, "r:" + ext) as archive: top_dir = os.path.commonprefix(archive.getnames()) archive.extractall(tempdir) os.remove(resolved_archive_file) shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file) shutil.rmtree(tempdir) return resolved_archive_file def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the URL's, delimited by a period. """ url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path_from_pm(url_or_filename): """ Tries to cache the specified URL using PathManager class. Returns the cached path if success otherwise failure. """ try: from fairseq.file_io import PathManager local_path = PathManager.get_local_path(url_or_filename) return local_path except Exception: return None def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ("http", "https", "s3"): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: cached_path = cached_path_from_pm(url_or_filename) if cached_path: return cached_path # Something unknown raise ValueError( "unable to parse {} as a URL or as a local path".format(url_or_filename) ) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): from botocore.exceptions import ClientError try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def request_wrap_timeout(func, url): import requests for attempt, timeout in enumerate([10, 20, 40, 60, 60]): try: return func(timeout=timeout) except requests.exceptions.Timeout as e: logger.warning( "Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs", url, attempt, timeout, exc_info=e, ) continue raise RuntimeError(f"Unable to fetch file {url}") def http_get(url, temp_file): import requests from tqdm import tqdm req = request_wrap_timeout(partial(requests.get, url, stream=True), url) content_length = req.headers.get("Content-Length") total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: try: import requests response = request_wrap_timeout( partial(requests.head, url, allow_redirects=True), url ) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except RuntimeError: etag = None filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*") matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, "wb") as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: output_string = json.dumps(meta) meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): """ Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. """ collection = set() with open(filename, "r", encoding="utf-8") as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
EXA-1-master
exa/libraries/fairseq/fairseq/file_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List, Optional import torch import torch.nn as nn from fairseq.token_generation_constraints import ( ConstraintState, OrderedConstraintState, UnorderedConstraintState, ) from torch import Tensor class Search(nn.Module): def __init__(self, tgt_dict): super().__init__() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.src_lengths = torch.tensor(-1) self.supports_constraints = False self.stop_on_max_len = False def step( self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None ): """Take a single search step. Args: step: the current search step, starting at 0 lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step scores: (bsz x input_beam_size x step) the historical model scores of each hypothesis up to this point prev_output_tokens: (bsz x step) the previously generated oputput tokens original_batch_idxs: (bsz) the tensor with the batch indices, in the range [0, bsz) this is useful in case there has been applied a re-ordering and we need to know the orignal indices Return: A tuple of (scores, indices, beams) where: scores: (bsz x output_beam_size) the scores of the chosen elements; output_beam_size can be larger than input_beam_size, e.g., we may return 2*input_beam_size to account for EOS indices: (bsz x output_beam_size) the indices of the chosen elements beams: (bsz x output_beam_size) the hypothesis ids of the chosen elements, in the range [0, input_beam_size) """ raise NotImplementedError @torch.jit.export def set_src_lengths(self, src_lengths): self.src_lengths = src_lengths @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): """Initialize constraint states for constrained decoding (if supported). Args: batch_constraints: (torch.Tensor, optional) the list of constraints, in packed form beam_size: (int) the beam size Returns: *encoder_out* rearranged according to *new_order* """ pass def prune_sentences(self, batch_idxs: Tensor): """ Removes constraint states for completed sentences (if supported). This is called from sequence_generator._generate() when sentences are deleted from the batch. Args: batch_idxs: Indices of *sentences* whose constraint state should be *kept*. """ pass def update_constraints(self, active_hypos: Tensor): """ Updates the constraint states by selecting the beam items that are retained. This is called at each time step of sequence_generator._generate() when the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size. Args: active_hypos: (batch size, beam size) list of integers denoting, for each sentence, which beam candidate items should be kept. """ pass class BeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.constraint_states = None @torch.jit.export def step( self, step: int, lprobs, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, candidate_multiple: int = 2, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best `candidate_muliple`(default 2) x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. candidate_multiple * beam_size, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] # Project back into relative indices and beams beams_buf = torch.div(indices_buf, vocab_size, rounding_mode="trunc") indices_buf = indices_buf.fmod(vocab_size) # At this point, beams_buf and indices_buf are single-dim and contain relative indices return scores_buf, indices_buf, beams_buf class PrefixConstrainedBeamSearch(Search): def __init__(self, tgt_dict, prefix_allowed_tokens_fn): super().__init__(tgt_dict) self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self.stop_on_max_len = True @torch.jit.export def apply_mask(self, x, prev_output_tokens, original_batch_idxs): beam_size = x.shape[0] // original_batch_idxs.shape[0] original_batch_idxs = ( original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist() ) mask = torch.full_like(x, -math.inf) for sent_i, (sent, batch_i) in enumerate( zip(prev_output_tokens, original_batch_idxs) ): mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0 return mask @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Tensor, prev_output_tokens: Tensor, original_batch_idxs: Tensor, ): bsz, beam_size, vocab_size = lprobs.size() lprobs += self.apply_mask( lprobs.view(bsz * beam_size, 1, vocab_size), prev_output_tokens, original_batch_idxs, ).view(bsz, beam_size, vocab_size) if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) return scores_buf, indices_buf, beams_buf class LexicallyConstrainedBeamSearch(Search): """Implements lexically constrained beam search as described in Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation. Post & Vilar, NAACL 2018. https://www.aclweb.org/anthology/N18-1119/ and Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting. Hu et al, NAACL 2019. https://www.aclweb.org/anthology/N19-1090/ This is accomplished by maintaining, for each beam hypothesis, a ConstraintState object (see constraints.py) that tracks which constraints have been generated and using this information to shape the beam for each input sentence. """ def __init__(self, tgt_dict, representation): super().__init__(tgt_dict) self.representation = representation self.vocab_size = len(tgt_dict) self.num_cands = 0 self.supports_constraints = True @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): self.constraint_states = [] for constraint_tensor in batch_constraints: if self.representation == "ordered": constraint_state = OrderedConstraintState.create(constraint_tensor) elif self.representation == "unordered": constraint_state = UnorderedConstraintState.create(constraint_tensor) self.constraint_states.append([constraint_state for i in range(beam_size)]) @torch.jit.export def prune_sentences(self, batch_idxs: Tensor): self.constraint_states = [ self.constraint_states[i] for i in batch_idxs.tolist() ] @torch.jit.export def update_constraints(self, active_hypos: Tensor): if self.constraint_states: batch_size = active_hypos.size(0) for sentid in range(batch_size): self.constraint_states[sentid] = [ self.constraint_states[sentid][i] for i in active_hypos[sentid] ] @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): """ A constrained step builds a large candidates list from the following: - the top 2 * {beam_size} items over the whole beam - for each item in the beam - the top {each_k} (default 1) - all next constraints We then compute the constrained state of each beam item, and assign stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so on. We then sort by (stripe, score), and truncate the list at 2 * beam size. Args: step: the decoder step lprobs: (batch size, beam size, target vocab) the target-vocab distributions for each item in the beam. Retrun: A tuple of (scores, indices, beams, constraints) where: scores: (batch, output beam size) the scores of the chosen elements indices: (batch, output beam size) the target vocab indices of the chosen elements beams: (batch, output beam size) the 0-indexed hypothesis ids of the chosen elements constraints: (batch, output beam size) the new constraint states """ each_k = 1 device = lprobs.device batch_size, beam_size, vocab_size = lprobs.size() self.num_cands = min( # Just take the k-best. We'll get another k from the 1-best from each # row, plus more from the constraints beam_size * 2, lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad ) # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items constraint_states = self.constraint_states if constraint_states and step > 0: not_finished_indices = [] for sentno, sent_constraints in enumerate(constraint_states): for beamno, state in enumerate(sent_constraints): index = sentno * beam_size + beamno if not state.finished: not_finished_indices.append(index) not_finished_indices = torch.tensor(not_finished_indices) if not_finished_indices.numel() > 0: lprobs.view(batch_size * beam_size, -1)[ not_finished_indices, self.eos ] = -math.inf if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam entry for each batch item lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(batch_size, -1), self.num_cands, ) scores_buf, indices_buf = top_prediction # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # Short circuit if there are no constraints in this batch if not constraint_states: return scores_buf, indices_buf, beams_buf # STEP 1: get top-1 from each hypothesis across all sentences in the batch if step > 0: top_scores, top_indices = torch.topk( lprobs.view(batch_size * beam_size, -1), k=each_k, dim=1, ) top_scores = top_scores.view(batch_size, -1) top_indices = top_indices.view(batch_size, -1) scores_buf = torch.cat((scores_buf, top_scores), dim=1) indices_buf = torch.cat((indices_buf, top_indices), dim=1) new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1) beams_buf = torch.cat((beams_buf, new_beams), dim=1) # Now, process sentences in the batch one by one. new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device) new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() for sentno, states in enumerate(constraint_states): scores, indices, beams, new_states = self.step_sentence( step, sentno, lprobs[sentno], constraint_states[sentno], beams_buf[sentno].clone(), indices_buf[sentno].clone(), scores_buf[sentno].clone(), ) new_scores_buf[sentno] = scores new_indices_buf[sentno] = indices new_beams_buf[sentno] = beams self.constraint_states[sentno] = new_states return new_scores_buf, new_indices_buf, new_beams_buf @torch.jit.export def step_sentence( self, step: int, sentno: int, lprobs: Tensor, constraint_states: List[List[ConstraintState]], beams_buf: Tensor, indices_buf: Tensor, scores_buf: Tensor, ): """Does per-sentence processing. Adds all constraints for each hypothesis to the list of candidates; then removes duplicates, sorts, and dynamically stripes across the banks. All tensor inputs are collapsed to those pertaining to a single input sentence. """ device = lprobs.device # STEP 2: Add all constraints for each beam item for beamno, state in enumerate(constraint_states): next_tokens = torch.tensor(list(state.next_tokens()), device=device).long() if next_tokens.numel() != 0: indices_buf = torch.cat((indices_buf, next_tokens)) next_beams = ( torch.tensor(beamno, device=device) .repeat(next_tokens.size(0)) .long() ) beams_buf = torch.cat((beams_buf, next_beams)) next_values = lprobs[beamno].take(next_tokens.view(-1)) scores_buf = torch.cat((scores_buf, next_values)) # At the 0th time step, there is just one beam item if step == 0: break # STEP 3: Compute the "bank" for each candidate. This is the # number of constraints it's generated. We need this so that # we can do round-robin allocation of the beam across these # banks. If C is the number of constraints, we select the best # item in bank C, then the best in bank C-1, etc, followed by # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so # on, until the maximum beam size. We accomplish this by # creating a sort key and striping across the banks. # Compute the new states for all candidates cands_size = indices_buf.size(0) constraint_states = [ constraint_states[beams_buf[i]].advance(indices_buf[i]) for i in range(cands_size) ] banks = torch.tensor([state.bank for state in constraint_states], device=device) # STEP 4: Sort num_constraint_tokens = len(state.tokens) # Sort by keys (bank, score) (i.e., sort banks together, and scores # within banks). AFAIK pytorch doesn't support either stable sort or # multi-key sorting, so we have to hack this. MAX_SCORE = -100 sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf sort_values, sort_indices = sort_key.sort(dim=0, descending=True) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] banks = banks[sort_indices] # Sort the constraints to follow suit constraint_states = [constraint_states[i] for i in sort_indices] # STEP 5: Remove duplicates. The topk calls (overall and # per-row) plus the per-row generation of constraints will # produce duplicates. Here we remove them. def roll(t): """Rolls a 1d tensor left by 1. [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3] """ return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0) # We map candidates (beam, token_id) to a single dimension. # This is then shifted by 1. We can then easily identify # duplicates and create a mask that identifies unique # extensions. uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf uniques_mask = roll(uniques_mask) != uniques_mask # Use the mask to pare down the data structures scores_buf = torch.masked_select(scores_buf, uniques_mask) indices_buf = torch.masked_select(indices_buf, uniques_mask) beams_buf = torch.masked_select(beams_buf, uniques_mask) banks = torch.masked_select(banks, uniques_mask) i = 1 for mask in uniques_mask[1:]: if not mask: constraint_states.pop(i) i += mask # STEP 6: Assign IDs round-robin across banks, sort, and # truncate. Now that the candidates are sorted by (bank, # score) and uniqed, we dynamically allocate the {beam_size} # beam by striping across the candidates. These stripes will # be used as sort keys to do round-robin selection. This is # accomplished in a single pass with offsets. Sorting by # highest-banks (furthest-along hypotheses) first ensures # progress through the constraints. # # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0 # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1 # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7 # = 0 5 10 1 6 11 13 2 7 12 3 8 # # Sorting by this then gives the following banks: # # 3 2 1 0 3 2 1 0 3 2 1 2 # # We'll take the top {beam_size} of these. stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)] stripes = torch.zeros_like(banks) cur_bank_count = -1 cur_bank = banks[0] for i, bank in enumerate(banks): if bank != cur_bank: cur_bank_count = 0 cur_bank = bank else: cur_bank_count += 1 stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count] # STEP 7: Sort by the stripes values sort_values, sort_indices = stripes.sort(dim=0) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] constraint_states = [constraint_states[i] for i in sort_indices] # STEP 8: Truncate to the candidates size! scores_buf = scores_buf[: self.num_cands] indices_buf = indices_buf[: self.num_cands] beams_buf = beams_buf[: self.num_cands] return scores_buf, indices_buf, beams_buf, constraint_states class LengthConstrainedBeamSearch(Search): def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b): super().__init__(tgt_dict) self.min_len_a = min_len_a self.min_len_b = min_len_b self.max_len_a = max_len_a self.max_len_b = max_len_b self.beam = BeamSearch(tgt_dict) self.needs_src_lengths = True def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): min_lens = self.min_len_a * self.src_lengths + self.min_len_b max_lens = self.max_len_a * self.src_lengths + self.max_len_b lprobs[step < min_lens, :, self.eos] = -math.inf lprobs[step >= max_lens, :, self.eos] = 0 return self.beam.step(step, lprobs, scores) class DiverseBeamSearch(Search): """Diverse Beam Search. See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models" for details. We implement cumulative diversity penalty here as default, optionally provide Hamming diversity described in the original paper, and a way to interpolate between the two through diversity_discount. Take the example below for illustration of cumulative diversity implemented. A) I like dogs. B) I like ____. C) There are ___. And we are at step=2, trying to fill in the blank: Hamming diversity: Penalty for B from A is 1 for "dogs" and 0 for any other words like "cats". Penalty for C from A is 1 for "dogs" and 0 for any other words like "cats". Cumulative diversity (default): Penalty for B from A is 3 for "dogs" and 0 for any other words like "cats". Penalty for C from A is 1 for "dogs" and 0 for any other words like "cats". B and C differ because B matches with A for "I" and "like" at respective steps incurring 2 cumulative penalty. Using divesrity_discount to interpolate between the two: if diverstiy_discount = 0.5, then Penalty for B from A is 1.75 (1 + 0.5 + 0.25) for "dogs" and 0 for any other words like "cats". Penalty for C from A is 1 for "dogs" and 0 for any other words like "cats". "I" and "like" matched for B and A at step 0 and 1 respectively. Since "I" is two steps away and "like" is one step away, they are discounted by (0.5)^2 and 0.5 respectively. When diversity_discount = 0, we recover Hammning diversity and when diversity_discount = 1, we recover cumulative diversity. NB: During beam search for each diversity group, `candidate_mutiple` is set to 1 rather than BeamSearch default(2). This is to ensure we have final `beam_size` candidates so that no diversity groups would be dropped during final token selection in sequence generation. For full backwards compatibility, use diversity_discount=0 and candidate_multiple=2. """ def __init__( self, tgt_dict, num_groups, diversity_strength, diversity_discount=1.0, candidate_multiple=1, ): super().__init__(tgt_dict) self.num_groups = num_groups self.diversity_strength = -diversity_strength self.beam = BeamSearch(tgt_dict) self.diversity_discount = diversity_discount self.candidate_multiple = candidate_multiple # Float tensor to keep track of overlap between groups. # Each token shared at the same step between two groups is counted as one. # Then token counts are discounted by `diversity_discount` for every next timestep. # Once initialized, dimension is batch_size * num_groups * num_groups. self.group_overlap = torch.empty(0) @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if beam_size % self.num_groups != 0: raise ValueError( "DiverseBeamSearch requires --beam to be divisible by the number of groups" ) # initialize diversity penalty diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs) scores_G, beams_G = [], [] # pre-allocating tensor for indices for all groups indices_G_stacked = torch.empty( bsz, int(beam_size / self.num_groups) * self.candidate_multiple, self.num_groups, dtype=torch.long, device=lprobs.device, ) for g in range(self.num_groups): lprobs_g = lprobs[:, g :: self.num_groups, :] scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None diversity_buf.zero_() # apply diversity penalty if g > 0: indices_ = indices_G_stacked[:, :, :g] if step > 0: penalty_val = 1 + self.group_overlap[original_batch_idxs, g, :g] penalty_val = penalty_val.unsqueeze(1) else: penalty_val = torch.ones(bsz, 1, 1) diversity_buf.scatter_add_( 1, indices_.reshape(bsz, -1), penalty_val.expand(indices_.size()) .reshape(bsz, -1) .to(diversity_buf), ) lprobs_g = torch.add( lprobs_g, other=diversity_buf.unsqueeze(1), alpha=self.diversity_strength, ) else: lprobs_g = lprobs_g.contiguous() scores_buf, indices_buf, beams_buf = self.beam.step( step, lprobs_g, scores_g, candidate_multiple=self.candidate_multiple ) beams_buf.mul_(self.num_groups).add_(g) scores_G.append(scores_buf.clone()) beams_G.append(beams_buf.clone()) indices_G_stacked[:, :, g] = indices_buf # interleave results from different groups scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1) indices_buf = indices_G_stacked.view(bsz, -1) beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1) # find num of overlapped tokens for each group pair # then discount it for next timestamp overlap = self.diversity_discount * torch.sum( indices_G_stacked.unsqueeze(2).eq(indices_G_stacked.unsqueeze(3)), dim=1 ) if step == 0: self.group_overlap = overlap else: self.group_overlap[original_batch_idxs] = ( self.group_overlap[original_batch_idxs] * self.diversity_discount + overlap ) return scores_buf, indices_buf, beams_buf class Sampling(Search): sampling_topk: int sampling_topp: float def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0): super().__init__(tgt_dict) self.sampling_topk = sampling_topk self.sampling_topp = sampling_topp def _sample_topp(self, lprobs): """Sample among the smallest set of elements whose cumulative probability mass exceeds p. See `"The Curious Case of Neural Text Degeneration" (Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_. Args: lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step Return: A tuple of (trimed_probs, truncated_indices) where: trimed_probs: (bsz x input_beam_size x ?) the model's probabilities over the elements selected to sample from. The width of the third dimension is determined by top-P. truncated_indices: (bsz x input_beam_size x ?) the indices of the chosen elements. """ probs = lprobs.exp_() # sort the last dimension (vocab dimension) in descending order sorted_probs, sorted_indices = probs.sort(descending=True) # compute a mask to indicate the words to be included in the top-P set. cumsum_probs = sorted_probs.cumsum(dim=2) mask = cumsum_probs.lt(self.sampling_topp) # note that mask was computed by 'lt'. One more word needs to be included # so that the cumulative probability mass can exceed p. cumsum_mask = mask.cumsum(dim=2) last_included = cumsum_mask[:, :, -1:] last_included.clamp_(0, mask.size()[2] - 1) mask = mask.scatter_(2, last_included, 1) # truncate unnecessary dims. max_dim = last_included.max() truncated_mask = mask[:, :, : max_dim + 1] truncated_probs = sorted_probs[:, :, : max_dim + 1] truncated_indices = sorted_indices[:, :, : max_dim + 1] # trim the words that are not in top-P by setting their probabilities # to 0, so that they would not be sampled later. trim_mask = ~truncated_mask trimed_probs = truncated_probs.masked_fill_(trim_mask, 0) return trimed_probs, truncated_indices @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() if self.sampling_topp > 0: # only sample from the smallest set of words whose cumulative probability mass exceeds p probs, top_indices = self._sample_topp(lprobs) elif self.sampling_topk > 0: # only sample from top-k candidates lprobs, top_indices = lprobs.topk(self.sampling_topk) probs = lprobs.exp_() else: probs = lprobs.exp_() # dummy data to be consistent with true branch for type check top_indices = torch.empty(0).to(probs) # sample if step == 0: indices_buf = torch.multinomial( probs.view(bsz, -1), beam_size, replacement=True, ).view(bsz, beam_size) else: indices_buf = torch.multinomial( probs.view(bsz * beam_size, -1), 1, replacement=True, ).view(bsz, beam_size) if step == 0: # expand to beam size probs = probs.expand(bsz, beam_size, -1) # gather scores scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1)) scores_buf = scores_buf.log_().view(bsz, -1) # remap indices if using top-k or top-P sampling if self.sampling_topk > 0 or self.sampling_topp > 0: indices_buf = torch.gather( top_indices.expand(bsz, beam_size, -1), dim=2, index=indices_buf.unsqueeze(-1), ).squeeze(2) if step == 0: beams_buf = indices_buf.new_zeros(bsz, beam_size) else: beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1) # make scores cumulative scores_buf.add_( torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf) ) return scores_buf, indices_buf, beams_buf class DiverseSiblingsSearch(Search): """ Beam search with diverse siblings. See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details. https://arxiv.org/abs/1611.08562 1/ Calculate hypotheses for each beam 2/ Intra-sibling ordering 3/ Rewrite scores 4/ Choose top K hypotheses if diversity_rate == 0 is equivalent to BeamSearch """ def __init__(self, tgt_dict, diversity_rate): super().__init__(tgt_dict) self.diversity_rate = diversity_rate self.beam = BeamSearch(tgt_dict) def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() k = min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ) s_list: List[Tensor] i_list: List[Tensor] s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)] i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)] sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate if step == 0: return self.beam.step(step, lprobs, scores) lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) # 1/ Calculate hypotheses for each beam for i in range(beam_size): torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i])) i_list[i].fmod_(vocab_size) # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores s_list[i].sub_(sibling_score) # 4/ Choose top K hypotheses indices = torch.stack(i_list, dim=1).view(bsz, -1) final_scores = torch.empty(0).to(lprobs) final_indices = torch.LongTensor().to(device=lprobs.device) final_beams = torch.LongTensor().to(device=lprobs.device) (final_scores, final_indices) = torch.topk( torch.stack(s_list, dim=1).view(bsz, -1), k, ) final_beams = final_indices // k for i in range(bsz): final_indices[i] = indices[i][final_indices[i]] return final_scores, final_indices, final_beams
EXA-1-master
exa/libraries/fairseq/fairseq/search.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil from typing import List, Optional logger = logging.getLogger(__file__) try: from iopath.common.file_io import g_pathmgr as IOPathManager try: # [FB only - for now] AWS PathHandler for PathManager from .fb_pathhandlers import S3PathHandler IOPathManager.register_handler(S3PathHandler()) except KeyError: logging.warning("S3PathHandler already registered.") except ImportError: logging.debug( "S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module." ) except ImportError: IOPathManager = None class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ @staticmethod def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) @staticmethod def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) @staticmethod def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path @staticmethod def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) @staticmethod def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) @staticmethod def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) @staticmethod def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) @staticmethod def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) @staticmethod def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) @staticmethod def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) @staticmethod def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) @staticmethod def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False @staticmethod def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) @staticmethod def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ @staticmethod def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) @staticmethod def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False
EXA-1-master
exa/libraries/fairseq/fairseq/file_io.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ import contextlib import logging import os import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from omegaconf import OmegaConf from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.models.ema import build_ema from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler from fairseq.utils import safe_hasattr logger = logging.getLogger(__name__) class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if ( max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0" ): raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() @property def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 @property def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or (self.is_fsdp and self.cfg.distributed_training.cpu_offload) @property def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master @property def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False @property def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" @property def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model @property def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info("Exponential Moving Average Shadow Model is initialized.") @property def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): if ( self.cfg.optimization.debug_param_names and self.cfg.common.fp16_no_flatten_grads ): params = [] self.param_names = [] for n, p in chain( self.model.named_parameters(), self.criterion.named_parameters() ): if p.requires_grad: params.append(p) self.param_names.append(n) else: params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info( "NOTE: your device may support faster training with --fp16 or --amp" ) self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) @property def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" if self.should_save_checkpoint_on_current_rank: logger.info(f"Saving checkpoint to {os.path.abspath(filename)}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {os.path.abspath(filename)}") return os.path.abspath(filename) return None def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: if ( "optimizer_history" in state and len(state["optimizer_history"]) > 0 and "num_updates" in state["optimizer_history"][-1] ): self.model.set_num_updates( state["optimizer_history"][-1]["num_updates"] ) # this is the code related to AdaPrune # In short, it removes redundant heads in multi-head attention module based on heads importance provided # For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI # The idea of prune in mha can be summarized as # Fine tune model (e.g. roberta encoder) on a certain datasets with regularization # After the model is trained. User could use get_reserve_head_index and _adaptive_prune_heads functions to get the top X heads with most importance. # Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually. # User will fine tune the the new roberta encoder via the ckpt saved above # To get rid of registering different pruned version of Roberta, I use the argument --mha-heads-to-keep to prune the Roberta model into a pruned version which matches the pruned ckpt. if ( safe_hasattr(self.model, "args") and safe_hasattr(self.model.args, "mha_heads_to_keep") and self.model.args.mha_heads_to_keep != -1 ): logger.info( f"Prune model: keep {self.model.args.mha_heads_to_keep} heads for each multihead attention module" ) for layer in self.model.encoder.sentence_encoder.layers: reserve_head_index = layer.self_attn._get_reserve_head_index( num_heads_to_keep=self.model.args.mha_heads_to_keep ) layer.self_attn._adaptive_prune_heads( reserve_head_index=reserve_head_index ) layer.self_attn._set_skip_embed_dim_check() logger.info(self.model) # this is the code related to AdaPrune # In short, it removes redundant units in feedforward layer in each transformer layer based on importance # For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI # The idea of prune in ffn can be summarized as # Fine tune model (e.g. roberta encoder) on a certain datasets with regularization # After the model is trained. User could use _get_fc_rank and _prune_fc_layer functions to get the top X units with most importance. # Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually. # User will fine tune the the new roberta encoder via the ckpt saved above # To get rid of registering different pruned version of Roberta, I use the argument --ffn-blocks-to-remove to prune the Roberta model into a pruned version which matches the pruned ckpt. if ( safe_hasattr(self.model, "args") and safe_hasattr(self.model.args, "ffn_blocks_to_remove") and self.model.args.ffn_blocks_to_remove != -1 ): logger.info( f"Prune model: remove {self.model.args.ffn_blocks_to_remove} ffn blocks for each transformer layer" ) for layer in self.model.encoder.sentence_encoder.layers: remove_index = layer._get_fc_rank( remove_num=self.model.args.ffn_blocks_to_remove ) layer._prune_fc_layer(remove_index=remove_index) logger.info(self.model) self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] epoch = itr_state["epoch"] if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state["iterations_in_epoch"] == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore( state["model"], build_fp32_params=self.cfg.ema.ema_fp32 ) else: logger.info("Loading EMA from checkpoint") self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info("Loading EMA fp32 params from checkpoint") self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=(self.cfg.common.seed + epoch) if self.cfg.dataset.update_ordered_indices_seed else self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=self.cfg.optimization.skip_remainder_batch, grouped_shuffling=self.cfg.dataset.grouped_shuffling, update_epoch_batch_itr=self.cfg.dataset.update_epoch_batch_itr, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=False, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch @metrics.aggregate("train") def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() has_oom = False # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) has_oom = True if raise_oom: raise e else: raise e except Exception: self.consolidate_optimizer() self.save_checkpoint( os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {} ) raise if has_oom: logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() ( logging_outputs, ( sample_size, ooms, total_train_time, ), ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slowmo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step( samples, raise_oom ) # recursion to feed in same batch except FloatingPointError: self.consolidate_optimizer() self.save_checkpoint( os.path.join(self.cfg.checkpoint.save_dir, "crash.pt"), {} ) # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) if hasattr(self, "param_names") and hasattr( self.optimizer, "fp32_optimizer" ): for p, n in zip(self.optimizer.fp32_optimizer.params, self.param_names): if torch.isinf(p.grad).any() or torch.isnan(p.grad).any(): logger.info(f"overflow in param {n}") grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_slowmo"): self.model.perform_slowmo( self.optimizer.optimizer, getattr(self.optimizer, "fp32_params", None) ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slowmo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output @metrics.aggregate("valid") def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_output def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm**0.5 should_agg_norm = self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if "target" in sample: sample["target"] = utils.move_to_cuda( sample["target"], device=self.last_device ) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( ( torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all() ) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) def _catalog_shared_params(module, memo=None, prefix=""): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ("." if prefix else "") + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ("." if prefix else "") + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1] def _get_module_by_path(module, path): path = path.split(".") for name in path: module = getattr(module, name) return module def _set_module_by_path(module, path, value): path = path.split(".") for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
EXA-1-master
exa/libraries/fairseq/fairseq/trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import typing as tp from abc import ABC, abstractmethod from collections import Counter from dataclasses import dataclass from multiprocessing import Pool import torch from fairseq.data import Dictionary, indexed_dataset from fairseq.file_chunker_utils import Chunker, find_offsets from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line logger = logging.getLogger("binarizer") @dataclass class BinarizeSummary: """ Keep track of what's going on in the binarizer """ num_seq: int = 0 replaced: tp.Optional[Counter] = None num_tok: int = 0 @property def num_replaced(self) -> int: if self.replaced is None: return 0 return sum(self.replaced.values()) @property def replaced_percent(self) -> float: return 100 * self.num_replaced / self.num_tok def __str__(self) -> str: base = f"{self.num_seq} sents, {self.num_tok} tokens" if self.replaced is None: return base return f"{base}, {self.replaced_percent:.3}% replaced" def merge(self, other: "BinarizeSummary"): replaced = None if self.replaced is not None: replaced = self.replaced if other.replaced is not None: if replaced is None: replaced = other.replaced else: replaced += other.replaced self.replaced = replaced self.num_seq += other.num_seq self.num_tok += other.num_tok class Binarizer(ABC): """ a binarizer describes how to take a string and build a tensor out of it """ @abstractmethod def binarize_line( self, line: str, summary: BinarizeSummary, ) -> torch.IntTensor: ... def _worker_prefix(output_prefix: str, worker_id: int): return f"{output_prefix}.pt{worker_id}" class FileBinarizer: """ An file binarizer can take a file, tokenize it, and binarize each line to a tensor """ @classmethod def multiprocess_dataset( cls, input_file: str, dataset_impl: str, binarizer: Binarizer, output_prefix: str, vocab_size=None, num_workers=1, ) -> BinarizeSummary: final_summary = BinarizeSummary() offsets = find_offsets(input_file, num_workers) # find_offsets returns a list of position [pos1, pos2, pos3, pos4] but we would want pairs: # [(pos1, pos2), (pos2, pos3), (pos3, pos4)] to process the chunks with start/end info # we zip the list with itself shifted by one to get all the pairs. (first_chunk, *more_chunks) = zip(offsets, offsets[1:]) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) worker_results = [ pool.apply_async( cls._binarize_chunk_and_finalize, args=( binarizer, input_file, start_offset, end_offset, _worker_prefix( output_prefix, worker_id, ), dataset_impl, ), kwds={ "vocab_size": vocab_size, } if vocab_size is not None else {}, ) for worker_id, (start_offset, end_offset) in enumerate( more_chunks, start=1 ) ] pool.close() pool.join() for r in worker_results: summ = r.get() final_summary.merge(summ) # do not close the bin file as we need to merge the worker results in final_ds, summ = cls._binarize_file_chunk( binarizer, input_file, offset_start=first_chunk[0], offset_end=first_chunk[1], output_prefix=output_prefix, dataset_impl=dataset_impl, vocab_size=vocab_size if vocab_size is not None else None, ) final_summary.merge(summ) if num_workers > 1: for worker_id in range(1, num_workers): # merge the worker outputs worker_output_prefix = _worker_prefix( output_prefix, worker_id, ) final_ds.merge_file_(worker_output_prefix) try: os.remove(indexed_dataset.data_file_path(worker_output_prefix)) os.remove(indexed_dataset.index_file_path(worker_output_prefix)) except Exception as e: logger.error( f"couldn't remove {worker_output_prefix}.*", exc_info=e ) # now we can close the file idx_file = indexed_dataset.index_file_path(output_prefix) final_ds.finalize(idx_file) return final_summary @staticmethod def _binarize_file_chunk( binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None, ) -> tp.Tuple[tp.Any, BinarizeSummary]: # (dataset builder, BinarizeSummary) """ creates a dataset builder and append binarized items to it. This function does not finalize the builder, this is useful if you want to do other things with your bin file like appending/merging other files """ bin_file = indexed_dataset.data_file_path(output_prefix) ds = indexed_dataset.make_builder( bin_file, impl=dataset_impl, vocab_size=vocab_size, ) summary = BinarizeSummary() with Chunker( PathManager.get_local_path(filename), offset_start, offset_end ) as line_iterator: for line in line_iterator: ds.add_item(binarizer.binarize_line(line, summary)) return ds, summary @classmethod def _binarize_chunk_and_finalize( cls, binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None, ): """ same as above, but also finalizes the builder """ ds, summ = cls._binarize_file_chunk( binarizer, filename, offset_start, offset_end, output_prefix, dataset_impl, vocab_size=vocab_size, ) idx_file = indexed_dataset.index_file_path(output_prefix) ds.finalize(idx_file) return summ class VocabularyDatasetBinarizer(Binarizer): """ Takes a Dictionary/Vocabulary, assign ids to each token using the dictionary encode_line function. """ def __init__( self, dict: Dictionary, tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line, append_eos: bool = True, reverse_order: bool = False, already_numberized: bool = False, ) -> None: self.dict = dict self.tokenize = tokenize self.append_eos = append_eos self.reverse_order = reverse_order self.already_numberized = already_numberized super().__init__() def binarize_line( self, line: str, summary: BinarizeSummary, ): if summary.replaced is None: summary.replaced = Counter() def replaced_consumer(word, idx): if idx == self.dict.unk_index and word != self.dict.unk_word: summary.replaced.update([word]) if self.already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if self.reverse_order: id_list.reverse() if self.append_eos: id_list.append(self.dict.eos()) ids = torch.IntTensor(id_list) else: ids = self.dict.encode_line( line=line, line_tokenizer=self.tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=self.append_eos, reverse_order=self.reverse_order, ) summary.num_seq += 1 summary.num_tok += len(ids) return ids class AlignmentDatasetBinarizer(Binarizer): """ binarize by parsing a set of alignments and packing them in a tensor (see utils.parse_alignment) """ def __init__( self, alignment_parser: tp.Callable[[str], torch.IntTensor], ) -> None: super().__init__() self.alignment_parser = alignment_parser def binarize_line( self, line: str, summary: BinarizeSummary, ): ids = self.alignment_parser(line) summary.num_seq += 1 summary.num_tok += len(ids) return ids class LegacyBinarizer: @classmethod def binarize( cls, filename: str, dico: Dictionary, consumer: tp.Callable[[torch.IntTensor], None], tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line, append_eos: bool = True, reverse_order: bool = False, offset: int = 0, end: int = -1, already_numberized: bool = False, ) -> tp.Dict[str, int]: binarizer = VocabularyDatasetBinarizer( dict=dico, tokenize=tokenize, append_eos=append_eos, reverse_order=reverse_order, already_numberized=already_numberized, ) return cls._consume_file( filename, binarizer, consumer, offset_start=offset, offset_end=end, ) @classmethod def binarize_alignments( cls, filename: str, alignment_parser: tp.Callable[[str], torch.IntTensor], consumer: tp.Callable[[torch.IntTensor], None], offset: int = 0, end: int = -1, ) -> tp.Dict[str, int]: binarizer = AlignmentDatasetBinarizer(alignment_parser) return cls._consume_file( filename, binarizer, consumer, offset_start=offset, offset_end=end, ) @staticmethod def _consume_file( filename: str, binarizer: Binarizer, consumer: tp.Callable[[torch.IntTensor], None], offset_start: int, offset_end: int, ) -> tp.Dict[str, int]: summary = BinarizeSummary() with Chunker( PathManager.get_local_path(filename), offset_start, offset_end ) as line_iterator: for line in line_iterator: consumer(binarizer.binarize_line(line, summary)) return { "nseq": summary.num_seq, "nunk": summary.num_replaced, "ntok": summary.num_tok, "replaced": summary.replaced, }
EXA-1-master
exa/libraries/fairseq/fairseq/binarizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Implements tracking of constraints for a beam item. A list of constraints is given as a list of one or more token sequences, each of length at least one token. For example, for an input sentence > Die maschinelle Übersetzung ist schwer zu kontrollieren. We could have the constraints: * to influence * hard There are two implementations: * OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints. * UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints. The difference is that in the first, the constraints are assumed to be in order; the algorithm will permit zero or more tokens between them. In the second, the constraints are not ordered, so many orderings will be explored. The same sequence can be present any number of times, and will appear that many times in the output. """ from collections import Counter from typing import List, Optional, Set, Tuple import torch class ConstraintState: def __init__(self): pass def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = ( 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) ) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset : offset + this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]: """ Transforms *one row* of a packed constraint tensor (e.g., for one sentence in the batch) into a list of constraint tensors. """ constraint_list = [] num_constraints = constraint_tensor[0] constraints = constraint_tensor.tolist() offset = 1 for i in range(num_constraints): where = constraints.index(0, offset) constraint_list.append(constraint_tensor[offset:where]) offset = where + 1 return constraint_list class ConstraintNode: """ Represents a node in a trie managing unordered constraints. """ def __init__(self, token: int = None, parent=None): # The token associate with this node (None for the root) self.token = int(token) if token is not None else None # The parent (None at the root) self.parent = parent # Whether this node is a completed constraint self.terminal = 0 # List of child nodes self.children = {} # The cumulative number of constraints from this point in the # trie forward self.num_constraints = 0 @property def id(self): return self.token def __str__(self): term = self.terminal != 0 return f"[{self.token}].{term}#{self.num_constraints}" def __getitem__(self, key: int): return self.children.get(key, None) def next_tokens(self) -> Set[int]: """The set of child labels.""" return set(self.children.keys()) @staticmethod def create(constraints: List[List[int]]): root = ConstraintNode() for sequence in constraints: root.add_sequence(sequence) return root @staticmethod def print_graph(node: "ConstraintNode"): if len(node.children) == 0: return str(node) else: s = f"({node}" for child in node.children.values(): s += " " + ConstraintNode.print_graph(child) s += ")" return s def token_counts(self) -> Counter: """Returns a counter of the number of times each token is used in a constraint. """ token_counts = Counter() kids = list(self.children.values()) while len(kids) > 0: kid = kids.pop() token_counts[kid.id] += kid.num_constraints kids += list(kid.children.values()) return token_counts def tokens(self) -> Set[int]: """Returns the set of tokens in constraints.""" return set(self.token_counts().keys()) def add_sequence(self, sequence: List[int]): """Adds a constraint, represented as a list of integers, to the trie.""" assert len(sequence) > 0 token = int(sequence[0]) if token not in self.children: self.children[token] = ConstraintNode(token, parent=self) node = self.children[token] if len(sequence) == 1: node.terminal += 1 node.num_constraints += 1 parent = node.parent while parent is not None: parent.num_constraints += 1 parent = parent.parent else: node.add_sequence(sequence[1:]) class UnorderedConstraintState(ConstraintState): """ Records progress through the set of constraints for each item in the beam using a trie. """ def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None): self.node = node if copy_from is None: # The root node self.root = node # The set of states in the graph that have been completed self.completed = Counter() # The... self.generated = Counter() # The list of tokens we need to generate self.needed_tokens = self.root.tokens() else: self.completed = Counter(copy_from.completed) self.generated = Counter(copy_from.generated) self.root = copy_from.root # Mark the node as generated if self.node != self.root: self.generated[node] += 1 @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) constraint_trie_root = ConstraintNode.create(constraint_list) return UnorderedConstraintState(constraint_trie_root) def __str__(self): gen_str = ",".join([str(node) for node in self.generated]) return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}" def __copy__(self): copied_state = UnorderedConstraintState(self.node, copy_from=self) return copied_state def copy(self): return self.__copy__() @property def name(self): if self.node.id is None: return "ROOT" else: return str(self.node.id) @property def is_root(self): return self.node == self.root @property def bank(self): return sum(self.generated.values()) @property def num_completed(self): """The number of constraints (not constraint tokens) that are completed. In addition to the already-completed states, we need to account for the current state, which might get marked as completed when another token is generated. """ in_final = self.node.terminal and self.completed[self.node] < self.node.terminal return sum(self.completed.values()) + in_final @property def finished(self): return self.root.num_constraints - self.num_completed == 0 @property def token_counts(self): return self.root.token_counts() @property def tokens(self): return self.root.tokens() @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" if self.node != self.root: return self.root.next_tokens().union(self.node.next_tokens()) else: return self.root.next_tokens() def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) next_state = None child = self.node[token] if child is not None and self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) def rewind(): """If we're mid-trie and an "illegal" token is chosen next, we need to reset our state to the root state. However, along the way, we need to check whether a prefix of the current trie state represents a state we could mark as completed. """ node = self.node while node != self.root: if node.terminal and self.completed[node] < node.terminal: next_state.completed[node] += 1 return next_state.generated[node] -= 1 node = node.parent # Fall off the graph, check the root if next_state is None and token in self.root.next_tokens(): child = self.root[token] # We can only traverse this edge if it's not saturated if self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) else: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() elif next_state is None: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() return next_state class ConstraintSequence: def __init__(self, sequences: List[List[int]]): """Represents a set of possibly multitoken constraints by concatenating them and internally recording the end points. """ self.sequences = [] self.endpoints = [] self.num_tokens = 0 self.tokens = set() for sequence in sequences: for token in sequence: self.tokens.add(token) self.num_tokens += len(sequence) self.endpoints += [False for x in range(len(sequence) - 1)] + [True] self.sequences += sequence def __getitem__(self, key: int): return self.sequences[key] def __len__(self): return len(self.sequences) def __str__(self): return str(self.sequences) class OrderedConstraintState(ConstraintState): """ Records progress through the set of linear nonbranching constraints with gaps. """ def __init__(self, sequence: ConstraintSequence, state: int = -1): self.sequence = sequence self.state = state @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) return OrderedConstraintState(ConstraintSequence(constraint_list), -1) def __str__(self): return f"{self.state}/{self.bank}x{self.num_completed}" def __copy__(self): return OrderedConstraintState(self.sequence, self.state) def copy(self): return self.__copy__() @property def num_completed(self): if self.state == -1: return 0 count = len( list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1])) ) return count @property def is_root(self): return self.state == -1 @property def name(self): if self.state == -1: return "ROOT" else: return str(self.sequence[self.state]) @property def bank(self) -> int: return self.state + 1 @property def finished(self): return self.state + 1 == len(self.sequence) @property def token_counts(self): return self.sequence.token_counts() @property def tokens(self): return self.sequence.tokens @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" tokens = set() if self.state > 0: tokens.add(self.sequence[0]) if not self.finished: tokens.add(self.sequence[self.state + 1]) return tokens def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) # print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="") if self.finished: # Accept anything next_state = self.copy() elif self.sequence[self.state + 1] == token: # Advance to the next token next_state = OrderedConstraintState(self.sequence, self.state + 1) elif self.sequence.endpoints[self.state]: # Accept anything between constraints (*) next_state = self.copy() elif token == self.sequence[0]: # Start over having generated the first token next_state = OrderedConstraintState(self.sequence, 0) else: # Start over from the root next_state = OrderedConstraintState(self.sequence, -1) return next_state
EXA-1-master
exa/libraries/fairseq/fairseq/token_generation_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Optional import torch from omegaconf import II from .dummy_dataset import DummyDataset from fairseq.data import Dictionary from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task logger = logging.getLogger(__name__) @dataclass class DummyMaskedLMConfig(FairseqDataclass): dict_size: int = 49996 dataset_size: int = 100000 tokens_per_sample: int = field( default=512, metadata={ "help": "max number of total tokens over all" " segments per sample for BERT dataset" }, ) batch_size: Optional[int] = II("dataset.batch_size") max_tokens: Optional[int] = II("dataset.max_tokens") max_target_positions: int = II("task.tokens_per_sample") @register_task("dummy_masked_lm", dataclass=DummyMaskedLMConfig) class DummyMaskedLMTask(FairseqTask): def __init__(self, cfg: DummyMaskedLMConfig): super().__init__(cfg) self.dictionary = Dictionary() for i in range(cfg.dict_size): self.dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(self.dictionary))) # add mask token self.mask_idx = self.dictionary.add_symbol("<mask>") self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 mask_idx = 0 pad_idx = 1 seq = torch.arange(cfg.tokens_per_sample) + pad_idx + 1 mask = torch.arange(2, cfg.tokens_per_sample, 7) # ~15% src = seq.clone() src[mask] = mask_idx tgt = torch.full_like(seq, pad_idx) tgt[mask] = seq[mask] self.dummy_src = src self.dummy_tgt = tgt def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.cfg.batch_size is not None: bsz = self.cfg.batch_size else: bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.cfg.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.cfg.tokens_per_sample, }, num_items=self.cfg.dataset_size, item_size=self.cfg.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/dummy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Optional import torch from .dummy_dataset import DummyDataset from fairseq.data import Dictionary from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task from omegaconf import II logger = logging.getLogger(__name__) @dataclass class DummyLMConfig(FairseqDataclass): dict_size: int = 49996 dataset_size: int = 100000 tokens_per_sample: int = field( default=512, metadata={"help": "max sequence length"} ) add_bos_token: bool = False batch_size: Optional[int] = II("dataset.batch_size") max_tokens: Optional[int] = II("dataset.max_tokens") max_target_positions: int = II("task.tokens_per_sample") @register_task("dummy_lm", dataclass=DummyLMConfig) class DummyLMTask(FairseqTask): def __init__(self, cfg: DummyLMConfig): super().__init__(cfg) # load dictionary self.dictionary = Dictionary() for i in range(cfg.dict_size): self.dictionary.add_symbol("word{}".format(i)) self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 logger.info("dictionary: {} types".format(len(self.dictionary))) seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 self.dummy_src = seq[:-1] self.dummy_tgt = seq[1:] def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.cfg.batch_size is not None: bsz = self.cfg.batch_size else: bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.cfg.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.cfg.tokens_per_sample, }, num_items=self.cfg.dataset_size, item_size=self.cfg.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/dummy_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("dummy_mt") class DummyMTTask(LegacyFairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("--dict-size", default=49996, type=int) parser.add_argument("--dataset-size", default=100000, type=int) parser.add_argument("--src-len", default=30, type=int) parser.add_argument("--tgt-len", default=30, type=int) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed dictionary.pad_to_multiple_(8) # often faster if divisible by 8 self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1 self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1 @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(dictionary))) args.max_source_positions = args.src_len + dictionary.pad() + 2 args.max_target_positions = args.tgt_len + dictionary.pad() + 2 return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ item_size = max(self.args.src_len, self.args.tgt_len) if self.args.batch_size is not None: bsz = self.args.batch_size else: bsz = max(1, self.args.max_tokens // item_size) tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.args.src_len, dtype=torch.long ), "prev_output_tokens": tgt.clone(), }, "target": tgt, "nsentences": bsz, "ntokens": bsz * self.args.tgt_len, }, num_items=self.args.dataset_size, item_size=item_size, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/dummy_mt.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import models/tasks to register them from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/__init__.py
import numpy as np from fairseq.data import FairseqDataset class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/dummy_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import random import torch from torch.utils import benchmark from fairseq.modules.multihead_attention import MultiheadAttention BATCH = [20, 41, 97] SEQ = 64 EMB = 48 HEADS = 4 DROP = 0.1 DEVICE = torch.device("cuda") ATTN_MASK_DTYPE = [torch.uint8, torch.bool, torch.float] KEY_PADDING_MASK_DTYPE = [torch.uint8, torch.bool] def _reset_seeds(): torch.manual_seed(0) random.seed(0) def _get_mask(to_dtype: torch.dtype, dim0: int, dim1: int): if to_dtype == torch.float: mask = torch.randint(0, 2, (dim0, dim1)).to(dtype=torch.bool) return mask.to(dtype=to_dtype).masked_fill(mask, -float("inf")) return torch.randint(0, 2, (dim0, dim1)).to(dtype=to_dtype) def benchmark_multihead_attention( label="", attn_dtype=torch.uint8, key_padding_dtype=torch.uint8, add_bias_kv=False, add_zero_attn=False, static_kv=False, batch_size=20, embedding=EMB, seq_len=SEQ, num_heads=HEADS, ): results = [] # device = torch.device("cuda") xformers_att_config = '{"name": "scaled_dot_product"}' attn_mask = _get_mask(to_dtype=attn_dtype, dim0=seq_len, dim1=seq_len) key_padding_mask = _get_mask( to_dtype=key_padding_dtype, dim0=batch_size, dim1=seq_len ) q = torch.rand(seq_len, batch_size, embedding, requires_grad=True) k = torch.rand(seq_len, batch_size, embedding, requires_grad=True) v = torch.rand(seq_len, batch_size, embedding, requires_grad=True) _reset_seeds() original_mha = MultiheadAttention( embedding, num_heads, dropout=0.0, xformers_att_config=None, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) xformers_mha = MultiheadAttention( embedding, num_heads, dropout=0.0, xformers_att_config=xformers_att_config, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) def original_bench_fw(q, k, v, key_padding_mask, attn_mask, static_kv): original_mha( query=q, key=k, value=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) def xformers_bench_fw(q, k, v, key_padding_mask, attn_mask, static_kv): xformers_mha( query=q, key=k, value=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) def original_bench_fw_bw(q, k, v, key_padding_mask, attn_mask, static_kv): output, _ = original_mha( query=q, key=k, value=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) loss = torch.norm(output) loss.backward() def xformers_bench_fw_bw(q, k, v, key_padding_mask, attn_mask, static_kv): output, _ = xformers_mha( query=q, key=k, value=v, key_padding_mask=key_padding_mask, attn_mask=attn_mask, static_kv=static_kv, ) loss = torch.norm(output) loss.backward() fns = [ original_bench_fw, xformers_bench_fw, original_bench_fw_bw, xformers_bench_fw_bw, ] for fn in fns: results.append( benchmark.Timer( stmt="fn(q, k, v, key_padding_mask, attn_mask, static_kv)", globals={ "q": q, "k": k, "v": v, "key_padding_mask": key_padding_mask, "attn_mask": attn_mask, "static_kv": static_kv, "fn": fn, }, label="multihead fw + bw", sub_label=f"{fn.__name__}", description=label, ).blocked_autorange(min_run_time=1) ) compare = benchmark.Compare(results) compare.print() def run_benchmarks(): for attn_dtype, key_padding_dtype, add_bias_kv, add_zero_attn in itertools.product( ATTN_MASK_DTYPE, KEY_PADDING_MASK_DTYPE, [True, False], [True, False] ): label = f"attn_dtype {attn_dtype}, key_padding_dtype {key_padding_dtype}, \ add_bias_kv {add_bias_kv}, add_zero_attn {add_zero_attn}" benchmark_multihead_attention( label=label, attn_dtype=attn_dtype, key_padding_dtype=key_padding_dtype, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) run_benchmarks()
EXA-1-master
exa/libraries/fairseq/fairseq/benchmark/benchmark_multihead_attention.py