python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from fairseq.dataclass.initialize import add_defaults, hydra_init from fairseq_cli.train import main as pre_main from fairseq import distributed_utils, metrics from fairseq.dataclass.configs import FairseqConfig import hydra from hydra.core.hydra_config import HydraConfig import torch from omegaconf import OmegaConf, open_dict logger = logging.getLogger("fairseq_cli.hydra_train") @hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config") def hydra_main(cfg: FairseqConfig) -> float: add_defaults(cfg) if cfg.common.reset_logging: reset_logging() # Hydra hijacks logging, fix that else: with open_dict(cfg): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) cfg.job_logging_cfg = OmegaConf.to_container(HydraConfig.get().job_logging, resolve=True) cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)) OmegaConf.set_struct(cfg, True) try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, pre_main) else: distributed_utils.call_main(cfg, pre_main) except BaseException as e: if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! " + str(e)) # get best val and return - useful for sweepers try: best_val = metrics.get_smoothed_value( "valid", cfg.checkpoint.best_checkpoint_metric ) except: best_val = None if best_val is None: best_val = float("inf") return best_val def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" hydra_init(cfg_name) hydra_main() if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/hydra_train.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data pre-processing: build vocabularies and binarize training data. """ import logging import os import shutil import sys from collections import Counter from itertools import zip_longest from multiprocessing import Pool from fairseq import options, tasks, utils from fairseq.binarizer import Binarizer from fairseq.data import indexed_dataset logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.preprocess") def main(args): utils.import_user_module(args) os.makedirs(args.destdir, exist_ok=True) logger.addHandler( logging.FileHandler( filename=os.path.join(args.destdir, "preprocess.log"), ) ) logger.info(args) task = tasks.get_task(args.task) def train_path(lang): return "{}{}".format(args.trainpref, ("." + lang) if lang else "") def file_name(prefix, lang): fname = prefix if lang is not None: fname += ".{lang}".format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return dest_path("dict", lang) + ".txt" def build_dictionary(filenames, src=False, tgt=False): assert src ^ tgt return task.build_dictionary( filenames, workers=args.workers, threshold=args.thresholdsrc if src else args.thresholdtgt, nwords=args.nwordssrc if src else args.nwordstgt, padding_factor=args.padding_factor, ) target = not args.only_source if not args.srcdict and os.path.exists(dict_path(args.source_lang)): raise FileExistsError(dict_path(args.source_lang)) if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert ( not args.srcdict or not args.tgtdict ), "cannot use both --srcdict and --tgtdict with --joined-dictionary" if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert ( args.trainpref ), "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary( {train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True, ) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert ( args.trainpref ), "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert ( args.trainpref ), "--trainpref must be set if --tgtdict is not specified" tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if target and tgt_dict is not None: tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): logger.info("[{}] Dictionary: {} types".format(lang, len(vocab))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result["replaced"]) n_seq_tok[0] += worker_result["nseq"] n_seq_tok[1] += worker_result["ntok"] input_file = "{}{}".format( input_prefix, ("." + lang) if lang is not None else "" ) offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize, ( args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[worker_id + 1], ), callback=merge_result, ) pool.close() ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab), ) merge_result( Binarizer.binarize( input_file, vocab, lambda t: ds.add_item(t), offset=0, end=offsets[1] ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) logger.info( "[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format( lang, input_file, n_seq_tok[0], n_seq_tok[1], 100 * sum(replaced.values()) / n_seq_tok[1], vocab.unk_word, ) ) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result["nseq"] input_file = input_prefix offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize_alignments, ( args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[worker_id + 1], ), callback=merge_result, ) pool.close() ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl ) merge_result( Binarizer.binarize_alignments( input_file, utils.parse_alignment, lambda t: ds.add_item(t), offset=0, end=offsets[1], ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0])) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if args.dataset_impl == "raw": # Copy original text file to destination folder output_text_file = dest_path( output_prefix + ".{}-{}".format(args.source_lang, args.target_lang), lang, ) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers) if args.validpref: for k, validpref in enumerate(args.validpref.split(",")): outprefix = "valid{}".format(k) if k > 0 else "valid" make_dataset( vocab, validpref, outprefix, lang, num_workers=args.workers ) if args.testpref: for k, testpref in enumerate(args.testpref.split(",")): outprefix = "test{}".format(k) if k > 0 else "test" make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix): make_binary_alignment_dataset( args.trainpref + "." + args.align_suffix, "train.align", num_workers=args.workers, ) if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix): make_binary_alignment_dataset( args.validpref + "." + args.align_suffix, "valid.align", num_workers=args.workers, ) if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix): make_binary_alignment_dataset( args.testpref + "." + args.align_suffix, "test.align", num_workers=args.workers, ) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() logger.info("Wrote preprocessed data to {}".format(args.destdir)) if args.alignfile: assert args.trainpref, "--trainpref must be set if --alignfile is specified" src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, "r", encoding="utf-8") as align_file: with open(src_file_name, "r", encoding="utf-8") as src_file: with open(tgt_file_name, "r", encoding="utf-8") as tgt_file: for a, s, t in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map(lambda x: tuple(x.split("-")), a.split())) for sai, tai in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk(): assert srcidx != src_dict.pad() assert srcidx != src_dict.eos() assert tgtidx != tgt_dict.pad() assert tgtidx != tgt_dict.eos() if srcidx not in freq_map: freq_map[srcidx] = {} if tgtidx not in freq_map[srcidx]: freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open( os.path.join( args.destdir, "alignment.{}-{}.txt".format(args.source_lang, args.target_lang), ), "w", encoding="utf-8", ) as f: for k, v in align_dict.items(): print("{} {}".format(src_dict[k], tgt_dict[v]), file=f) def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab), ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize( filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) return res def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl, vocab_size=None, ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments( filename, parse_alignment, consumer, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) return res def dataset_dest_prefix(args, output_prefix, lang): base = "{}/{}".format(args.destdir, output_prefix) if lang is not None: lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = "" else: lang_part = ".{}-{}".format(args.source_lang, args.target_lang) return "{}{}".format(base, lang_part) def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers) def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/preprocess.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from omegaconf import DictConfig def main(cfg: DictConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) assert cfg.common_eval.path is not None, "--path required for generation!" assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw" ), "--replace-unk requires a raw text dataset (--dataset-impl=raw)" if cfg.common_eval.results_path is not None: os.makedirs(cfg.common_eval.results_path, exist_ok=True) output_path = os.path.join( cfg.common_eval.results_path, "generate-{}.txt".format(cfg.dataset.gen_subset), ) with open(output_path, "w", buffering=1, encoding="utf-8") as h: return _main(cfg, h) else: return _main(cfg, sys.stdout) def get_symbols_to_strip_from_output(generator): if hasattr(generator, "symbols_to_strip_from_output"): return generator.symbols_to_strip_from_output else: return {generator.eos} def _main(cfg: DictConfig, output_file): logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=output_file, ) logger = logging.getLogger("fairseq_cli.generate") utils.import_user_module(cfg.common) if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 12000 logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Load dataset splits task = tasks.setup_task(cfg.task) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary overrides = ast.literal_eval(cfg.common_eval.model_overrides) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task) if cfg.generation.lm_path is not None: overrides["data"] = cfg.task.data try: lms, _ = checkpoint_utils.load_model_ensemble( [cfg.generation.lm_path], arg_overrides=overrides, task=None ) except: logger.warning( f"Failed to load language model! Please make sure that the language model dict is the same " f"as target dict and is located in the data dir ({cfg.task.data})" ) raise assert len(lms) == 1 else: lms = [None] # Optimize ensemble for generation for model in chain(models, lms): if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(cfg.dataset.gen_subset), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models] ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=cfg.distributed_training.distributed_world_size, shard_id=cfg.distributed_training.distributed_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) # Initialize generator gen_timer = StopwatchMeter() extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight} generator = task.build_generator( models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(cfg.scoring, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if cfg.generation.prefix_size > 0: prefix_tokens = sample["target"][:, : cfg.generation.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step( generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): has_target = sample["target"] is not None # Remove padding if "src_tokens" in sample["net_input"]: src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) else: src_tokens = None target_tokens = None if has_target: target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text( sample_id ) target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, cfg.common_eval.post_process, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output( generator ), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not cfg.common_eval.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str), file=output_file) if has_target: print("T-{}\t{}".format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not cfg.common_eval.quiet: score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print( "H-{}\t{}\t{}".format(sample_id, score, hypo_str), file=output_file, ) # detokenized hypothesis print( "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str), file=output_file, ) print( "P-{}\t{}".format( sample_id, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"] .div_(math.log(2)) .tolist(), ) ), ), file=output_file, ) if cfg.generation.print_alignment == "hard": print( "A-{}\t{}".format( sample_id, " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment ] ), ), file=output_file, ) if cfg.generation.print_alignment == "soft": print( "A-{}\t{}".format( sample_id, " ".join( [ ",".join(src_probs) for src_probs in alignment ] ), ), file=output_file, ) if cfg.generation.print_step: print( "I-{}\t{}".format(sample_id, hypo["steps"]), file=output_file, ) if cfg.generation.retain_iter_history: for step, h in enumerate(hypo["history"]): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h["tokens"].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print( "E-{}_{}\t{}".format(sample_id, step, h_str), file=output_file, ) # Score only the top hypothesis if has_target and j == 0: if align_dict is not None or cfg.common_eval.post_process is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line( target_str, add_if_not_exist=True ) hypo_tokens = tgt_dict.encode_line( detok_hypo_str, add_if_not_exist=True ) if hasattr(scorer, "add_string"): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info( "Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) if has_target: if cfg.bpe and not cfg.generation.sacrebleu: if cfg.common_eval.post_process: logger.warning( "BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization" ) else: logger.warning( "If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization" ) # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( "Generate {} with beam={}: {}".format( cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string() ), file=output_file, ) return scorer def cli_main(): parser = options.get_generation_parser() args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/generate.py
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/__init__.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from argparse import Namespace from itertools import chain import torch from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import metrics, progress_bar from omegaconf import DictConfig logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.validate") def main(cfg: DictConfig, override_args=None): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_world_size > 1: data_parallel_world_size = distributed_utils.get_data_parallel_world_size() data_parallel_rank = distributed_utils.get_data_parallel_rank() else: data_parallel_world_size = 1 data_parallel_rank = 0 if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, "model_overrides", "{}"))) else: overrides = None # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=overrides, suffix=cfg.checkpoint.checkpoint_suffix, ) model = models[0] # Move models to GPU for model in models: if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(saved_cfg) # Build criterion criterion = task.build_criterion(saved_cfg.criterion) criterion.eval() for subset in cfg.dataset.valid_subset.split(","): try: task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task) dataset = task.dataset(subset) except KeyError: raise Exception("Cannot find dataset: " + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=data_parallel_world_size, shard_id=data_parallel_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if data_parallel_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=cfg.common.all_gather_list_size, group=distributed_utils.get_data_parallel_group(), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch( override_parser, suppress_defaults=True ) distributed_utils.call_main( convert_namespace_to_omegaconf(args), main, override_args=override_args ) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/validate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate raw text with a trained model. Batches data on-the-fly. """ import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.interactive") Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints") Translation = namedtuple("Translation", "src_str hypos pos_scores alignments") def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h: for src_str in h: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer def make_batches(lines, cfg, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if cfg.generation.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [ task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list ] if cfg.generation.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference( tokens, lengths, constraints=constraints_tensor ), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch["id"] src_tokens = batch["net_input"]["src_tokens"] src_lengths = batch["net_input"]["src_lengths"] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, ) def main(cfg: FairseqConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) start_time = time.time() total_translate_time = 0 utils.import_user_module(cfg.common) if cfg.interactive.buffer_size < 1: cfg.interactive.buffer_size = 1 if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.batch_size = 1 assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( not cfg.dataset.batch_size or cfg.dataset.batch_size <= cfg.interactive.buffer_size ), "--batch-size cannot be larger than --buffer-size" logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Setup task, e.g., translation task = tasks.setup_task(cfg.task) # Load ensemble overrides = ast.literal_eval(cfg.common_eval.model_overrides) logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Initialize generator generator = task.build_generator(models, cfg.generation) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if cfg.generation.constraints: logger.warning( "NOTE: Constrained decoding currently assumes a shared subword vocabulary." ) if cfg.interactive.buffer_size > 1: logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info("Type the input sentence and press return:") start_id = 0 for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size): results = [] for batch in make_batches(inputs, cfg, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, } translate_start_time = time.time() translations = task.inference_step( generator, models, sample, constraints=constraints ) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if cfg.generation.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append( ( start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations), }, ) ) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): src_str = '' if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) print("S-{}\t{}".format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print( "C-{}\t{}".format( id_, tgt_dict.string(constraint, cfg.common_eval.post_process) ) ) # Process top predictions for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print("H-{}\t{}\t{}".format(id_, score, hypo_str)) # detokenized hypothesis print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str)) print( "P-{}\t{}".format( id_, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"].div_(math.log(2)).tolist(), ) ), ) ) if cfg.generation.print_alignment: alignment_str = " ".join( ["{}-{}".format(src, tgt) for src, tgt in alignment] ) print("A-{}\t{}".format(id_, alignment_str)) # update running id_ counter start_id += len(inputs) logger.info( "Total time: {:.3f} seconds; translation time: {:.3f}".format( time.time() - start_time, total_translate_time ) ) def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/interactive.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import argparse import logging import math import os import sys from typing import Dict, Optional, Any, List, Tuple, Callable import numpy as np import torch from fairseq import ( checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils, ) from fairseq.data import iterators from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed_utils import is_master from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from omegaconf import DictConfig, OmegaConf logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.train") def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if is_master(cfg.distributed_training) and "job_logging_cfg" in cfg: # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) if distributed_utils.is_master(cfg.distributed_training): checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) # Load valid dataset (we load training data below, based on the latest checkpoint) for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. model params: {:,} (num. trained: {:,})".format( sum(p.numel() for p in model.parameters()), sum(p.numel() for p in model.parameters() if p.requires_grad), ) ) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per GPU = {} and batch size per GPU = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool: # skip check if no validation was done in the current epoch if valid_loss is None: return False if cfg.checkpoint.patience <= 0: return False def is_better(a, b): return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b prev_best = getattr(should_stop_early, "best", None) if prev_best is None or is_better(valid_loss, prev_best): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if should_stop_early.num_runs >= cfg.checkpoint.patience: logger.info( "early stop since valid performance hasn't improved for last {} runs".format( cfg.checkpoint.patience ) ) return True else: return False @metrics.aggregate("train") def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator(itr, update_freq) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start iterating over samples") for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % cfg.common.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) # Save checkpoint if do_save or should_stop: checkpoint_utils.save_checkpoint( cfg.checkpoint, trainer, epoch_itr, valid_losses[0] ) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def validate( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, subsets: List[str], ) -> List[Optional[float]]: """Evaluate the model on the validation set(s) and return the losses.""" if cfg.dataset.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(cfg.dataset.fixed_validation_seed) trainer.begin_valid_epoch(epoch_itr.epoch) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) # Initialize data iterator itr = trainer.get_valid_iterator(subset).next_epoch_itr( shuffle=False, set_dataset_epoch=False # use a fixed valid set ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), ) # create a new root metrics aggregator so validation metrics # don't pollute other aggregators (e.g., train meters) with metrics.aggregate(new_root=True) as agg: for sample in progress: trainer.valid_step(sample) # log validation stats stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values()) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric]) return valid_losses def get_valid_stats( cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any] ) -> Dict[str, Any]: stats["num_updates"] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, "best"): key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric) best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min stats[key] = best_function( checkpoint_utils.save_checkpoint.best, stats[cfg.checkpoint.best_checkpoint_metric], ) return stats def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/train.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Evaluate the perplexity of a trained language model. """ import logging import math import os import sys from argparse import Namespace from typing import Iterable, List, Optional import torch import fairseq from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter from fairseq.sequence_scorer import SequenceScorer from omegaconf import DictConfig logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.eval_lm") def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = False, remove_bos_token: bool = False, device: Optional[torch.device] = None, ): """ Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) """ if target_dictionary is None: target_dictionary = source_dictionary if device is None: device = next(models[0].parameters()).device gen_timer = StopwatchMeter() scorer = SequenceScorer(target_dictionary, softmax_batch) score_sum = 0.0 count = 0 if post_process is not None: if post_process in {"subword_nmt", "@@ "}: bpe_cont = post_process.rstrip() bpe_toks = { i for i in range(len(source_dictionary)) if source_dictionary[i].endswith(bpe_cont) } else: raise NotImplementedError( "--post-process={post_process} is not implemented" ) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() for sample in batch_iterator: if "net_input" not in sample: continue sample = utils.move_to_cuda(sample, device=device) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample["ntokens"]) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample["id"][i] tokens = hypo["tokens"] tgt_len = tokens.numel() pos_scores = hypo["positional_scores"].float() if remove_bos_token: assert hypo["tokens"][0].item() == target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf")) if inf_scores.any(): logger.info( "skipping tokens with inf scores:", target_dictionary.string(tokens[inf_scores.nonzero()]), ) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if output_word_probs or output_word_stats: w = "" word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add( pos_scores[i].item(), next_prob ) is_bpe = False w = "" if output_word_probs: logger.info( str(int(sample_id)) + " " + ( "\t".join( "{} [{:2f}]".format(x[0], x[1]) for x in word_prob ) ) ) avg_nll_loss = ( -score_sum / count / math.log(2) if count > 0 else 0 ) # convert to base 2 logger.info( "Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format( gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0 ) ) if output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): logger.info(ws) return { "loss": avg_nll_loss, "perplexity": 2 ** avg_nll_loss, } class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): """increments counters for the sum of log probs of current word and next word (given context ending at current word). Since the next word might be at the end of the example, or it might be not counted because it is not an ending subword unit, also keeps track of how many of those we have seen""" if next_word_prob is not None: self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return "{}\t{}\t{}\t{}\t{}\t{}".format( self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, self.count - self.missing_next_words, ) def main(cfg: DictConfig, **unused_kwargs): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) logger.info(cfg) if cfg.eval_lm.context_window > 0: # reduce tokens per sample by the required context window size cfg.task.tokens_per_sample -= cfg.eval_lm.context_window # Initialize the task using the current *cfg* task = tasks.setup_task(cfg.task) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task, ) use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) # Optimize ensemble for generation and set the source and dest dicts on the model # (required by scorer) for model in models: if use_fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) assert len(models) > 0 logger.info( "num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters())) ) # Load dataset splits task.load_dataset(cfg.dataset.gen_subset) dataset = task.dataset(cfg.dataset.gen_subset) logger.info( "{} {} {:,} examples".format( cfg.task.data, cfg.dataset.gen_subset, len(dataset) ) ) itr = task.eval_lm_dataloader( dataset=dataset, max_tokens=cfg.dataset.max_tokens or 36000, batch_size=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( *[model.max_positions() for model in models] ), num_shards=max( cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size, ), shard_id=max( cfg.dataset.shard_id, cfg.distributed_training.distributed_rank, ), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, context_window=cfg.eval_lm.context_window, ) itr = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) results = eval_lm( models=models, source_dictionary=task.source_dictionary, batch_iterator=itr, post_process=cfg.common_eval.post_process, output_word_probs=cfg.eval_lm.output_word_probs, output_word_stats=cfg.eval_lm.output_word_stats, target_dictionary=task.target_dictionary, softmax_batch=cfg.eval_lm.softmax_batch, remove_bos_token=getattr(cfg.task, "add_bos_token", False), ) logger.info( "Loss (base 2): {:.4f}, Perplexity: {:.2f}".format( results["loss"], results["perplexity"] ) ) return results def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/eval_lm.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BLEU scoring of generated translations against reference translations. """ import argparse import os import sys from fairseq.data import dictionary from fairseq.scoring import bleu def get_parser(): parser = argparse.ArgumentParser( description="Command-line script for BLEU scoring." ) # fmt: off parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') # fmt: on return parser def cli_main(): parser = get_parser() args = parser.parse_args() print(args) assert args.sys == "-" or os.path.exists( args.sys ), "System output file {} does not exist".format(args.sys) assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: yield line.lower() else: yield line if args.sacrebleu: import sacrebleu def score(fdsys): with open(args.ref) as fdref: print(sacrebleu.corpus_bleu(fdsys, [fdref]).format()) elif args.sentence_bleu: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for i, (sys_tok, ref_tok) in enumerate( zip(readlines(fdsys), readlines(fdref)) ): scorer.reset(one_init=True) sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(i, scorer.result_string(args.order)) else: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer( bleu.BleuConfig( pad=dict.pad(), eos=dict.eos(), unk=dict.unk(), ) ) for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if args.sys == "-": score(sys.stdin) else: with open(args.sys, "r") as f: score(f) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq_cli/score.py
#!/usr/bin/env python """Helper script to compare two argparse.Namespace objects.""" from argparse import Namespace # noqa def main(): ns1 = eval(input("Namespace 1: ")) ns2 = eval(input("Namespace 2: ")) def keys(ns): ks = set() for k in dir(ns): if not k.startswith("_"): ks.add(k) return ks k1 = keys(ns1) k2 = keys(ns2) def print_keys(ks, ns1, ns2=None): for k in ks: if ns2 is None: print("{}\t{}".format(k, getattr(ns1, k, None))) else: print( "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None)) ) print("Keys unique to namespace 1:") print_keys(k1 - k2, ns1) print() print("Keys unique to namespace 2:") print_keys(k2 - k1, ns2) print() print("Overlapping keys with different values:") ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")] print_keys(ks, ns1, ns2) print() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/compare_namespaces.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into a train and valid set while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import random import sys def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("sample_output", help="train output file") parser.add_argument("remainder_output", help="valid output file") parser.add_argument("-k", type=int, help="remainder size") parser.add_argument( "--lines", action="store_true", help="split lines instead of docs" ) args = parser.parse_args() assert args.k is not None sample = [] remainder = [] num_docs = [0] def update_sample(doc): if len(sample) < args.k: sample.append(doc.copy()) else: i = num_docs[0] j = random.randrange(i + 1) if j < args.k: remainder.append(sample[j]) sample[j] = doc.copy() else: remainder.append(doc.copy()) num_docs[0] += 1 doc.clear() with open(args.input, "r", encoding="utf-8") as h: doc = [] for i, line in enumerate(h): if line.strip() == "": # empty line indicates new document update_sample(doc) else: doc.append(line) if args.lines: update_sample(doc) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) if len(doc) > 0: update_sample(doc) print(file=sys.stderr, flush=True) assert len(sample) == args.k with open(args.sample_output, "w", encoding="utf-8") as out: first = True for doc in sample: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) with open(args.remainder_output, "w", encoding="utf-8") as out: first = True for doc in remainder: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/split_train_valid_docs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Use this script in order to build symmetric alignments for your translation dataset. This script depends on fast_align and mosesdecoder tools. You will need to build those before running the script. fast_align: github: http://github.com/clab/fast_align instructions: follow the instructions in README.md mosesdecoder: github: http://github.com/moses-smt/mosesdecoder instructions: http://www.statmt.org/moses/?n=Development.GetStarted The script produces the following files under --output_dir: text.joined - concatenation of lines from the source_file and the target_file. align.forward - forward pass of fast_align. align.backward - backward pass of fast_align. aligned.sym_heuristic - symmetrized alignment. """ import argparse import os from itertools import zip_longest def main(): parser = argparse.ArgumentParser(description="symmetric alignment builer") # fmt: off parser.add_argument('--fast_align_dir', help='path to fast_align build directory') parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory') parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and') parser.add_argument('--source_file', help='path to a file with sentences ' 'in the source language') parser.add_argument('--target_file', help='path to a file with sentences ' 'in the target language') parser.add_argument('--output_dir', help='output directory') # fmt: on args = parser.parse_args() fast_align_bin = os.path.join(args.fast_align_dir, "fast_align") symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal") sym_fast_align_bin = os.path.join( args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl" ) # create joined file joined_file = os.path.join(args.output_dir, "text.joined") with open(args.source_file, "r", encoding="utf-8") as src, open( args.target_file, "r", encoding="utf-8" ) as tgt: with open(joined_file, "w", encoding="utf-8") as joined: for s, t in zip_longest(src, tgt): print("{} ||| {}".format(s.strip(), t.strip()), file=joined) bwd_align_file = os.path.join(args.output_dir, "align.backward") # run forward alignment fwd_align_file = os.path.join(args.output_dir, "align.forward") fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file ) assert os.system(fwd_fast_align_cmd) == 0 # run backward alignment bwd_align_file = os.path.join(args.output_dir, "align.backward") bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file ) assert os.system(bwd_fast_align_cmd) == 0 # run symmetrization sym_out_file = os.path.join(args.output_dir, "aligned") sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format( SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin, ) assert os.system(sym_cmd) == 0 if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/build_sym_alignment.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for decoding" ) parser.add_argument("--input", required=True, help="input file to decode") parser.add_argument("--input_format", choices=["piece", "id"], default="piece") args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.input_format == "piece": def decode(l): return "".join(sp.DecodePieces(l)) elif args.input_format == "id": def decode(l): return "".join(sp.DecodeIds(l)) else: raise NotImplementedError def tok2int(tok): # remap reference-side <unk> (represented as <<unk>>) to 0 return int(tok) if tok != "<<unk>>" else 0 with open(args.input, "r", encoding="utf-8") as h: for line in h: if args.input_format == "id": print(decode(list(map(tok2int, line.rstrip().split())))) elif args.input_format == "piece": print(decode(line.rstrip().split())) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/spm_decode.py
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/__init__.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import re import shutil import sys pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt") pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt") pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt") def parse_checkpoints(files): entries = [] for f in files: m = pt_regexp_epoch_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) else: m = pt_regexp_update_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) return entries def last_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(entries, reverse=True)[:n]] def every_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(sorted(entries)[::-n])] def main(): parser = argparse.ArgumentParser( description=( "Recursively delete checkpoint files from `root_dir`, " "but preserve checkpoint_best.pt and checkpoint_last.pt" ) ) parser.add_argument("root_dirs", nargs="*") parser.add_argument( "--save-last", type=int, default=0, help="number of last checkpoints to save" ) parser.add_argument( "--save-every", type=int, default=0, help="interval of checkpoints to save" ) parser.add_argument( "--preserve-test", action="store_true", help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)", ) parser.add_argument( "--delete-best", action="store_true", help="delete checkpoint_best.pt" ) parser.add_argument( "--delete-last", action="store_true", help="delete checkpoint_last.pt" ) parser.add_argument( "--no-dereference", action="store_true", help="don't dereference symlinks" ) args = parser.parse_args() files_to_desymlink = [] files_to_preserve = [] files_to_delete = [] for root_dir in args.root_dirs: for root, _subdirs, files in os.walk(root_dir): if args.save_last > 0: to_save = last_n_checkpoints(files, args.save_last) else: to_save = [] if args.save_every > 0: to_save += every_n_checkpoints(files, args.save_every) for file in files: if not pt_regexp.fullmatch(file): continue full_path = os.path.join(root, file) if ( not os.path.basename(root).startswith("test_") or args.preserve_test ) and ( (file == "checkpoint_last.pt" and not args.delete_last) or (file == "checkpoint_best.pt" and not args.delete_best) or file in to_save ): if os.path.islink(full_path) and not args.no_dereference: files_to_desymlink.append(full_path) else: files_to_preserve.append(full_path) else: files_to_delete.append(full_path) if len(files_to_desymlink) == 0 and len(files_to_delete) == 0: print("Nothing to do.") sys.exit(0) files_to_desymlink = sorted(files_to_desymlink) files_to_preserve = sorted(files_to_preserve) files_to_delete = sorted(files_to_delete) print("Operations to perform (in order):") if len(files_to_desymlink) > 0: for file in files_to_desymlink: print(" - preserve (and dereference symlink): " + file) if len(files_to_preserve) > 0: for file in files_to_preserve: print(" - preserve: " + file) if len(files_to_delete) > 0: for file in files_to_delete: print(" - delete: " + file) while True: resp = input("Continue? (Y/N): ") if resp.strip().lower() == "y": break elif resp.strip().lower() == "n": sys.exit(0) print("Executing...") if len(files_to_desymlink) > 0: for file in files_to_desymlink: realpath = os.path.realpath(file) print("rm " + file) os.remove(file) print("cp {} {}".format(realpath, file)) shutil.copyfile(realpath, file) if len(files_to_delete) > 0: for file in files_to_delete: print("rm " + file) os.remove(file) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/rm_pt.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Count the number of documents and average number of lines and tokens per document in a large file. Documents should be separated by a single empty line. """ import argparse import gzip import sys import numpy as np def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--gzip", action="store_true") args = parser.parse_args() def gopen(): if args.gzip: return gzip.open(args.input, "r") else: return open(args.input, "r", encoding="utf-8") num_lines = [] num_toks = [] with gopen() as h: num_docs = 1 num_lines_in_doc = 0 num_toks_in_doc = 0 for i, line in enumerate(h): if len(line.strip()) == 0: # empty line indicates new document num_docs += 1 num_lines.append(num_lines_in_doc) num_toks.append(num_toks_in_doc) num_lines_in_doc = 0 num_toks_in_doc = 0 else: num_lines_in_doc += 1 num_toks_in_doc += len(line.rstrip().split()) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) print(file=sys.stderr, flush=True) print("found {} docs".format(num_docs)) print("average num lines per doc: {}".format(np.mean(num_lines))) print("average num toks per doc: {}".format(np.mean(num_toks))) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/count_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import contextlib import sys import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for encoding" ) parser.add_argument( "--inputs", nargs="+", default=["-"], help="input files to filter/encode" ) parser.add_argument( "--outputs", nargs="+", default=["-"], help="path to save encoded outputs" ) parser.add_argument("--output_format", choices=["piece", "id"], default="piece") parser.add_argument( "--min-len", type=int, metavar="N", help="filter sentence pairs with fewer than N tokens", ) parser.add_argument( "--max-len", type=int, metavar="N", help="filter sentence pairs with more than N tokens", ) args = parser.parse_args() assert len(args.inputs) == len( args.outputs ), "number of input and output paths should match" sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.output_format == "piece": def encode(l): return sp.EncodeAsPieces(l) elif args.output_format == "id": def encode(l): return list(map(str, sp.EncodeAsIds(l))) else: raise NotImplementedError if args.min_len is not None or args.max_len is not None: def valid(line): return (args.min_len is None or len(line) >= args.min_len) and ( args.max_len is None or len(line) <= args.max_len ) else: def valid(lines): return True with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8")) if input != "-" else sys.stdin for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8")) if output != "-" else sys.stdout for output in args.outputs ] stats = { "num_empty": 0, "num_filtered": 0, } def encode_line(line): line = line.strip() if len(line) > 0: line = encode(line) if valid(line): return line else: stats["num_filtered"] += 1 else: stats["num_empty"] += 1 return None for i, lines in enumerate(zip(*inputs), start=1): enc_lines = list(map(encode_line, lines)) if not any(enc_line is None for enc_line in enc_lines): for enc_line, output_h in zip(enc_lines, outputs): print(" ".join(enc_line), file=output_h) if i % 10000 == 0: print("processed {} lines".format(i), file=sys.stderr) print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr) print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/spm_encode.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into shards while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import contextlib def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--num-shards", type=int) args = parser.parse_args() assert args.num_shards is not None and args.num_shards > 1 with open(args.input, "r", encoding="utf-8") as h: with contextlib.ExitStack() as stack: outputs = [ stack.enter_context( open(args.input + ".shard" + str(i), "w", encoding="utf-8") ) for i in range(args.num_shards) ] doc = [] first_doc = [True] * args.num_shards def output_doc(i): if not first_doc[i]: outputs[i].write("\n") first_doc[i] = False for line in doc: outputs[i].write(line) doc.clear() num_docs = 0 for line in h: if line.strip() == "": # empty line indicates new document output_doc(num_docs % args.num_shards) num_docs += 1 else: doc.append(line) output_doc(num_docs % args.num_shards) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/shard_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import sys import sentencepiece as spm if __name__ == "__main__": spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/spm_train.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import os import re import torch from fairseq.file_io import PathManager def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, "rb") as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state["model"] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( "For checkpoint {}, expected list of params: {}, " "but found: {}".format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state["model"] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") else: pt_regexp = re.compile(r"checkpoint(\d+)\.pt") files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception( "Found {} checkpoint files but need at least {}", len(entries), n ) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description="Tool to average the params of input checkpoints to " "produce a new checkpoint", ) # fmt: off parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, ' 'and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, ' 'and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' 'when using --num-update-checkpoints, this will set an upper bound on which update to use' 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.' 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500' ) # fmt: on args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints assert args.checkpoint_upper_bound is None or ( args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None ), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints" assert ( args.num_epoch_checkpoints is None or args.num_update_checkpoints is None ), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints" if num is not None: args.inputs = last_n_checkpoints( args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound, ) print("averaging checkpoints: ", args.inputs) new_state = average_checkpoints(args.inputs) with PathManager.open(args.output, "wb") as f: torch.save(new_state, f) print("Finished writing averaged checkpoint to {}".format(args.output)) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from fairseq.data import Dictionary, data_utils, indexed_dataset def get_parser(): parser = argparse.ArgumentParser( description="writes text from binarized file to stdout" ) # fmt: off parser.add_argument('--dataset-impl', help='dataset implementation', choices=indexed_dataset.get_available_dataset_impl()) parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() dictionary = Dictionary.load(args.dict) if args.dict is not None else None dataset = data_utils.load_indexed_dataset( args.input, dictionary, dataset_impl=args.dataset_impl, default="lazy", ) for tensor_line in dataset: if dictionary is None: line = " ".join([str(int(x)) for x in tensor_line]) else: line = dictionary.string(tensor_line) print(line) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/read_binarized.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys """Reads in a fairseq output file, and verifies that the constraints (C- lines) are present in the output (the first H- line). Assumes that constraints are listed prior to the first hypothesis. """ constraints = [] found = 0 total = 0 for line in sys.stdin: if line.startswith("C-"): constraints.append(line.rstrip().split("\t")[1]) elif line.startswith("H-"): text = line.split("\t")[2] for constraint in constraints: total += 1 if constraint in text: found += 1 else: print(f"No {constraint} in {text}", file=sys.stderr) constraints = [] print(f"Found {found} / {total} = {100 * found / total:.1f}%")
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/constraints/validate.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index < len(words) - length + 1 phr = " ".join(words[index : index + length]) for i in range(index, index + length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if "\t" in line: source, target = line.split("\t") if args.add_sos: target = f"<s> {target}" if args.add_eos: target = f"{target} </s>" if len(target.split()) >= args.len: words = [target] num = args.number choices = {} for i in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = " ".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(" ".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(" ".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target = target.replace(choice, " " * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep="\t") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") parser.add_argument( "--add-sos", default=False, action="store_true", help="add <s> token" ) parser.add_argument( "--add-eos", default=False, action="store_true", help="add </s> token" ) parser.add_argument("--seed", "-s", default=0, type=int) args = parser.parse_args() main(args)
EXA-1-master
exa/models/unilm-master/decoding/GAD/scripts/constraints/extract.py
import os import sys import time import torch import logging import argparse import copy from tqdm import tqdm from torch import Tensor from omegaconf import open_dict from typing import Dict, Optional from fairseq import utils from fairseq.checkpoint_utils import load_model_ensemble_and_task logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("inference") def write_result(results, output_file): with open(output_file, 'w') as f: for line in results: f.write(line + '\n') @torch.no_grad() def forward_decoder(model, input_tokens, encoder_out, temperature=1.0, incremental_state=None, parallel_forward_start_pos=None, use_log_softmax=False): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) if use_log_softmax: probs = model.get_normalized_probs(decoder_out_tuple, log_probs=True, sample=None) else: probs = decoder_out_tuple[0] pred_tokens = torch.argmax(probs, dim=-1).squeeze(0) return pred_tokens @torch.no_grad() def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device): """beam search | greedy decoding implemented by fairseq""" src_dict = task.source_dictionary tgt_dict = task.target_dictionary gen_args = copy.copy(args) with open_dict(gen_args): gen_args.beam = beam_size generator = task.build_generator(models, gen_args) data_size = len(data_lines) all_results = [] logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = True batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) translations = generator.generate(models, batch, prefix_tokens=None) results = [] for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos]) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta @torch.no_grad() def baseline_generate(data_lines, model, task, batch_size, device, no_use_logsoft=True, max_len=200): """batch Implementation""" src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] start = time.perf_counter() logger.info(f'Baseline generate') for start_idx in tqdm(range(0, data_size, batch_size)): batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = True batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) batch_tokens = [[tgt_dict.eos()] for _ in range(batch_size)] finish_list = [] for step in range(0, max_len): cur_input_tokens = torch.tensor(batch_tokens).to(device).long() pred_tokens = forward_decoder(model, cur_input_tokens, encoder_out, incremental_state, use_log_softmax=not no_use_logsoft, ) for i, pred_tok in enumerate(pred_tokens): if len(batch_tokens[i]) == 1: batch_tokens[i].append(pred_tok.item()) else: if batch_tokens[i][-1] != tgt_dict.eos(): batch_tokens[i].append(pred_tok.item()) else: if i not in finish_list: finish_list.append(i) batch_tokens[i].append(tgt_dict.eos()) if len(finish_list) == batch_size: break batch_tokens = [y for x, y in sorted(zip(batch['id'].cpu().tolist(), batch_tokens))] for tokens in batch_tokens: all_results.append(tgt_dict.string(tokens[1:])) remove_bpe_results = [line.replace('@@ ', '') for line in all_results] delta = time.perf_counter() - start return remove_bpe_results, delta def construct_hash_sets(batch_sents, min_gram=1, max_gram=3): """batch Implementation""" batch_hash_dicts = [] for sent in batch_sents: hash_dict = {} for i in range(0, len(sent) - min_gram + 1): for j in range(min_gram, max_gram + 1): if i + j <= len(sent): ngram = tuple(sent[i: i + j]) if ngram not in hash_dict: hash_dict[ngram] = [] hash_dict[ngram].append(i + j) batch_hash_dicts.append(hash_dict) return batch_hash_dicts def find_hash_sets(hash_set, tokens, min_gram=1, max_gram=3): for i in range(min_gram, max_gram + 1): if len(tokens) < i: return -1 ngram = tuple(tokens[-i:]) if ngram not in hash_set: return -1 if len(hash_set[ngram]) == 1: return hash_set[ngram][0] return -1 def cut_incremental_state(incremental_state, keep_len, encoder_state_ids): for n in incremental_state: if n[: n.index('.')] in encoder_state_ids: continue for k in incremental_state[n]: if incremental_state[n][k] is not None: if incremental_state[n][k].dim() == 4: incremental_state[n][k] = incremental_state[n][k][:, :, :keep_len] elif incremental_state[n][k].dim() == 2: incremental_state[n][k] = incremental_state[n][k][:, :keep_len] @torch.no_grad() def aggressive_generate(data_lines, model, task, batch_size, device, max_len=200): """batch Implementation""" src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] start_time = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_results = [[tgt_dict.eos()] for _ in range(batch_size)] batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = False batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) src_tokens = net_input['src_tokens'].tolist() batch_tokens = [[tgt_dict.eos()] for _ in range(batch_size)] line_id = batch['id'].cpu().tolist() # remove padding, for hash construct batch_src_lines = [batch_ids[line_id[i]].cpu().tolist() for i in range(0, batch_size)] src_hash_lists = construct_hash_sets(batch_src_lines) finish_list = [] at_list = [] # pred token position start_list = [0] * batch_size # src token position src_pos_list = [0] * batch_size for step in range(0, max_len): # Aggressive Decoding at the first step if step == 0: cur_span_input_tokens = torch.tensor([[tgt_dict.eos()] + t for t in src_tokens]).to(device).long() else: # padding, 2 * max_len for boundary conditions pad_tokens = [([tgt_dict.eos()] + [tgt_dict.pad()] * max_len * 2) for _ in range(batch_size)] for i in range(batch_size): index = max_len if max_len < len(batch_tokens[i]) else len(batch_tokens[i]) pad_tokens[i][:index] = batch_tokens[i][:index] cur_span_input_tokens = torch.tensor(pad_tokens).to(device) cur_span_input_tokens = cur_span_input_tokens[:, : cur_span_input_tokens.ne(tgt_dict.pad()).sum(1).max()] input_tokens_add = [t[1:] + [-1] for t in cur_span_input_tokens.cpu().tolist()] pred_tensor = forward_decoder(model, cur_span_input_tokens, encoder_out) pred_tokens = pred_tensor.cpu().tolist() if batch_size == 1: pred_tokens = [pred_tokens] for i, (input_token_add, pred_token) in enumerate(zip(input_tokens_add, pred_tokens)): if i not in finish_list: # wrong pos is based on the src sent wrong_pos = len(batch_src_lines[i][src_pos_list[i]:]) for j, (inp, pred) in enumerate(zip(input_token_add[start_list[i]:], pred_token[start_list[i]:])): if inp != pred: wrong_pos = j break if step == 0: src_pos_list[i] += wrong_pos batch_tokens[i].extend(pred_token[start_list[i]: start_list[i] + wrong_pos]) if (batch_tokens[i][-1] == tgt_dict.eos() and len(batch_tokens[i]) != 1 and wrong_pos >= len(batch_src_lines[i][src_pos_list[i]:])) or start_list[i] > max_len: finish_list.append(i) if len(batch_tokens[i]) > max_len + 1: batch_tokens[i] = batch_tokens[i][:max_len + 1] batch_results[i] = batch_tokens[i] else: if i not in at_list: # greedy decoding batch_tokens[i] = batch_tokens[i][: start_list[i] + wrong_pos + 1] batch_tokens[i].append(pred_token[start_list[i] + wrong_pos]) start_list[i] = start_list[i] + wrong_pos + 1 at_list.append(i) else: batch_tokens[i].append(pred_token[start_list[i]]) start_list[i] += 1 find_end_idx = find_hash_sets(src_hash_lists[i], batch_tokens[i]) if find_end_idx != -1: start_list[i] = len(batch_tokens[i]) - 1 src_pos_list[i] = find_end_idx batch_tokens[i] = batch_tokens[i] + batch_src_lines[i][src_pos_list[i]:] at_list.remove(i) if len(finish_list) == batch_size: break batch_results = [y for x, y in sorted(zip(line_id, batch_results))] for tokens in batch_results: all_results.append(tgt_dict.string(tokens[1:])) delta = time.perf_counter() - start_time remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint-path', type=str, default=None, help='path to model file, e.g., /to/path/checkpoint_best.pt') parser.add_argument('--bin-data', type=str, default=None, help='directory containing src and tgt dictionaries') parser.add_argument('--input-path', type=str, default=None, help='path to eval file, e.g., /to/path/conll14.bpe.txt') parser.add_argument('--output-path', type=str, default=None, help='path to output file, e.g., /to/path/conll14.pred.txt') parser.add_argument('--batch', type=int, default=10, help='batch size') parser.add_argument('--beam', type=int, default=1, help='beam size') parser.add_argument('--fairseq', action='store_true', default=False, help='fairseq decoding') parser.add_argument('--baseline', action='store_true', default=False, help='greedy/batch decoding') parser.add_argument('--aggressive', action='store_true', default=False, help='aggressive decoding') parser.add_argument('--block', type=int, default=None) parser.add_argument('--match', type=int, default=1) parser.add_argument('--cpu', action='store_true', default=False) parser.add_argument('--fp16', action='store_true', default=False) cmd_args = parser.parse_args() cmd_args.checkpoint_path = os.path.expanduser(cmd_args.checkpoint_path) cmd_args.bin_data = os.path.expanduser(cmd_args.bin_data) cmd_args.input_path = os.path.expanduser(cmd_args.input_path) cmd_args.output_path = os.path.expanduser(cmd_args.output_path) models, args, task = load_model_ensemble_and_task(filenames=[cmd_args.checkpoint_path], arg_overrides={'data': cmd_args.bin_data}) device = torch.device('cuda') model = models[0].to(device).eval() if cmd_args.fp16: logging.info("fp16 enabled!") model.half() with open(cmd_args.input_path, 'r') as f: bpe_sents = [l.strip() for l in f.readlines()] remove_bpe_results = None if cmd_args.fairseq: remove_bpe_results, delta = fairseq_generate(bpe_sents, args, models, task, cmd_args.batch, cmd_args.beam, device) logger.info(f'Fairseq generate batch {cmd_args.batch}, beam {cmd_args.beam}: {delta}') elif cmd_args.baseline: remove_bpe_results, delta = baseline_generate(bpe_sents, model, task, cmd_args.batch, device) logger.info(f'Baseline generate: {delta}') elif cmd_args.aggressive: remove_bpe_results, delta = aggressive_generate(bpe_sents, model, task, cmd_args.batch, device) logger.info(f'Aggressive generate: {delta}') if cmd_args.output_path is not None: write_result(remove_bpe_results, cmd_args.output_path)
EXA-1-master
exa/models/unilm-master/decoding/IAD/inference_batch.py
import logging, os, sys import time import torch from torch import Tensor from typing import Dict, List, Optional import copy from tqdm import tqdm from omegaconf import open_dict import fairseq from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq import utils from fairseq.data import data_utils import argparse logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("inference") def write_result(results, output_file): with open(output_file, 'w') as f: for line in results: f.write(line + '\n') @torch.no_grad() def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device): # beam search | greedy decoding implemented by fairseq src_dict = task.source_dictionary tgt_dict = task.target_dictionary gen_args = copy.copy(args) with open_dict(gen_args): gen_args.beam = beam_size generator = task.build_generator(models, gen_args) data_size = len(data_lines) all_results = [] logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = True batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) translations = generator.generate(models, batch, prefix_tokens=None) results = [] for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos]) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta @torch.no_grad() def forward_decoder(model, input_tokens, encoder_out, incremental_state, parallel_forward_start_pos=None, temperature=1.0, use_log_softmax=True): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) if use_log_softmax: # 1, len, vocab probs = model.get_normalized_probs(decoder_out_tuple, log_probs=True, sample=None) else: probs = decoder_out_tuple[0] # len pred_tokens = torch.argmax(probs, dim=-1).squeeze(0) return pred_tokens @torch.no_grad() def baseline_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200): # simplified greedy decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] start = time.perf_counter() logger.info(f'Baseline generate') for start_idx in tqdm(range(0, data_size)): bpe_line = data_lines[start_idx] src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long() net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device), 'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)} encoder_out = model.encoder.forward_torchscript(net_input) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) tokens = [tgt_dict.eos()] for step in range(0, max_len): cur_input_tokens = torch.tensor([tokens]).to(device).long() # scalar pred_token = forward_decoder(model, cur_input_tokens, encoder_out, incremental_state, use_log_softmax=not no_use_logsoft).item() if pred_token == tgt_dict.eos(): break else: tokens.append(pred_token) all_results.append(tgt_dict.string(tokens[1:])) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta def construct_hash_sets(sent, min_gram=1, max_gram=3): hash_dict = {} for i in range(0, len(sent) - min_gram + 1): for j in range(min_gram, max_gram+1): if i + j <= len(sent): ngram = tuple(sent[i: i+j]) if ngram not in hash_dict: hash_dict[ngram] = [] hash_dict[ngram].append(i+j) return hash_dict def find_hash_sets(hash_set, tokens, min_gram=1, max_gram=3): for i in range(min_gram, max_gram+1): if len(tokens) < i: return -1 ngram = tuple(tokens[-i:]) if ngram not in hash_set: return -1 if len(hash_set[ngram]) == 1: return hash_set[ngram][0] return -1 def cut_incremental_state(incremental_state, keep_len, encoder_state_ids): for n in incremental_state: if n[: n.index('.')] in encoder_state_ids: continue for k in incremental_state[n]: if incremental_state[n][k] is not None: if incremental_state[n][k].dim() == 4: incremental_state[n][k] = incremental_state[n][k][:, :, :keep_len] elif incremental_state[n][k].dim() == 2: incremental_state[n][k] = incremental_state[n][k][:, :keep_len] @torch.no_grad() def aggressive_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200): src_dict = task.source_dictionary tgt_dict = task.target_dictionary encoder_state_ids = [] for i in range(len(model.decoder.layers)): encoder_state_ids.append(model.decoder.layers[i].encoder_attn._incremental_state_id) data_size = len(data_lines) all_results = [] start_time = time.perf_counter() for start_idx in tqdm(range(0, data_size)): bpe_line = data_lines[start_idx] src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long() net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device), 'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)} encoder_out = model.encoder.forward_torchscript(net_input) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) src_tokens_remove_eos_list = src_tokens[:-1].tolist() src_hash = construct_hash_sets(src_tokens_remove_eos_list) start = 0 tokens = [tgt_dict.eos()] while start < len(src_tokens_remove_eos_list) and len(tokens) < max_len + 1: cur_span_input_tokens = torch.tensor([tokens + src_tokens_remove_eos_list[start:]]).to(device).long() pred_tokens = forward_decoder(model, cur_span_input_tokens, encoder_out, incremental_state, parallel_forward_start_pos=len(tokens) - 1, use_log_softmax=not no_use_logsoft) pred_judge = pred_tokens.cpu() == src_tokens[start:] if all(pred_judge): tokens += src_tokens[start:].tolist() break else: wrong_pos = pred_judge.tolist().index(False) start += wrong_pos tokens.extend(pred_tokens.cpu().tolist()[: wrong_pos + 1]) cut_incremental_state(incremental_state, keep_len=len(tokens) - 1, encoder_state_ids=encoder_state_ids) cur_len = len(tokens) for step in range(cur_len, max_len + 1): cur_input_tokens = torch.tensor([tokens]).to(device).long() pred_token = forward_decoder(model, cur_input_tokens, encoder_out, incremental_state, use_log_softmax=not no_use_logsoft).item() if pred_token == tgt_dict.eos(): start = len(src_tokens_remove_eos_list) break else: tokens.append(pred_token) find_end_idx = find_hash_sets(src_hash, tokens) if find_end_idx != -1: start = find_end_idx if start < len(src_tokens_remove_eos_list): break if len(tokens) > max_len + 1: tokens = tokens[:max_len + 1] all_results.append(tgt_dict.string(tokens[1:])) delta = time.perf_counter() - start_time remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta @torch.no_grad() def paper_aggressive_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200): src_dict = task.source_dictionary tgt_dict = task.target_dictionary encoder_state_ids = [] for i in range(len(model.decoder.layers)): encoder_state_ids.append(model.decoder.layers[i].encoder_attn._incremental_state_id) data_size = len(data_lines) all_results = [] start_time = time.perf_counter() for start_idx in tqdm(range(0, data_size)): bpe_line = data_lines[start_idx] src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long() net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device), 'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)} encoder_out = model.encoder.forward_torchscript(net_input) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) src_tokens_remove_eos_list = src_tokens[:-1].tolist() src_hash = construct_hash_sets(src_tokens_remove_eos_list) src_tokens_add_pad_list = torch.tensor(src_tokens_remove_eos_list + [-1]) # [..., -1] tokens = [tgt_dict.eos()] while (len(tokens) == 1 or tokens[-1] != tgt_dict.eos()) and len(tokens) < max_len + 1: if len(tokens) == 1: find_end_idx = 0 else: find_end_idx = find_hash_sets(src_hash, tokens) if find_end_idx != -1 and find_end_idx < len(src_tokens_remove_eos_list): cur_span_input_tokens = torch.tensor([tokens + src_tokens_remove_eos_list[find_end_idx:]]).to(device).long() pred_tokens = forward_decoder(model, cur_span_input_tokens, encoder_out, incremental_state, parallel_forward_start_pos=len(tokens) - 1, use_log_softmax=not no_use_logsoft) pred_judge = pred_tokens.cpu() == src_tokens_add_pad_list[find_end_idx:] wrong_pos = pred_judge.tolist().index(False) tokens.extend(pred_tokens.cpu().tolist()[: wrong_pos + 1]) cut_incremental_state(incremental_state, keep_len=len(tokens) - 1, encoder_state_ids=encoder_state_ids) else: cur_input_tokens = torch.tensor([tokens]).to(device).long() pred_token = forward_decoder(model, cur_input_tokens, encoder_out, incremental_state, use_log_softmax=not no_use_logsoft).item() tokens.append(pred_token) if len(tokens) > max_len + 1: tokens = tokens[:max_len + 1] if tokens[-1] == tgt_dict.eos(): tokens = tokens[:-1] all_results.append(tgt_dict.string(tokens[1:])) delta = time.perf_counter() - start_time remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint-path', type=str, required=True, help='path to model file, e.g., /to/path/checkpoint_best.pt') parser.add_argument('--bin-data', type=str, required=True, help='directory containing src and tgt dictionaries') parser.add_argument('--input-path', type=str, required=True, help='path to eval file, e.g., /to/path/conll14.bpe.txt') parser.add_argument('--output-path', type=str, default=None, help='path to output file, e.g., /to/path/conll14.pred.txt') parser.add_argument('--batch', type=int, default=None, help='batch size') parser.add_argument('--beam', type=int, default=5, help='beam size') parser.add_argument('--baseline', action='store_true', default=False, help='greedy/one-by-one decoding') parser.add_argument('--aggressive', action='store_true', default=False, help='aggressive decoding') parser.add_argument('--no_use_logsoft', action='store_true', default=False, help='not use log_softmax when aggressive decoding') parser.add_argument('--block', type=int, default=None) parser.add_argument('--match', type=int, default=1) parser.add_argument('--cpu', action='store_true', default=False) cmd_args = parser.parse_args() cmd_args.checkpoint_path = os.path.expanduser(cmd_args.checkpoint_path) cmd_args.bin_data = os.path.expanduser(cmd_args.bin_data) cmd_args.input_path = os.path.expanduser(cmd_args.input_path) cmd_args.output_path = os.path.expanduser(cmd_args.output_path) models, args, task = load_model_ensemble_and_task(filenames=[cmd_args.checkpoint_path], arg_overrides={'data': cmd_args.bin_data}) if cmd_args.cpu: device = torch.device('cpu') else: device = torch.device('cuda') model = models[0].to(device).eval() with open(cmd_args.input_path, 'r') as f: bpe_sents = [l.strip() for l in f.readlines()] if cmd_args.batch is not None: remove_bpe_results, delta = fairseq_generate(bpe_sents, args, models, task, cmd_args.batch, cmd_args.beam, device) logger.info(f'Fairseq generate batch {cmd_args.batch}, beam {cmd_args.beam}: {delta}') elif cmd_args.baseline: remove_bpe_results, delta = baseline_generate(bpe_sents, model, task, device, no_use_logsoft=cmd_args.no_use_logsoft) logger.info(f'Baseline generate: {delta}') elif cmd_args.aggressive: remove_bpe_results, delta = paper_aggressive_generate(bpe_sents, model, task, device, no_use_logsoft=cmd_args.no_use_logsoft) # remove_bpe_results, delta = aggressive_generate(bpe_sents, model, task, device, no_use_logsoft=cmd_args.no_use_logsoft) logger.info(f'Aggressive generate: {delta}') if cmd_args.output_path is not None: write_result(remove_bpe_results, cmd_args.output_path)
EXA-1-master
exa/models/unilm-master/decoding/IAD/inference.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup if sys.version_info < (3, 6): sys.exit("Sorry, Python >= 3.6 is required for fairseq.") def write_version_py(): with open(os.path.join("fairseq", "version.txt")) as f: version = f.read().strip() # append latest commit hash to version string try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"]) .decode("ascii") .strip() ) version += "+" + sha[:7] except Exception: pass # write version info to fairseq/version.py with open(os.path.join("fairseq", "version.py"), "w") as f: f.write('__version__ = "{}"\n'.format(version)) return version version = write_version_py() with open("README.md") as f: readme = f.read() if sys.platform == "darwin": extra_compile_args = ["-stdlib=libc++", "-O3"] else: extra_compile_args = ["-std=c++11", "-O3"] class NumpyExtension(Extension): """Source: https://stackoverflow.com/a/54128391""" def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return self.__include_dirs + [numpy.get_include()] @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} try: # torch is not available when generating docs from torch.utils import cpp_extension extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat", sources=[ "fairseq/clib/libnat/edit_dist.cpp", ], ) ] ) if "CUDA_HOME" in os.environ: extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat_cuda", sources=[ "fairseq/clib/libnat_cuda/edit_dist.cu", "fairseq/clib/libnat_cuda/binding.cpp", ], ), cpp_extension.CppExtension( "fairseq.ngram_repeat_block_cuda", sources=[ "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp", "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu", ], ), ] ) cmdclass["build_ext"] = cpp_extension.BuildExtension except ImportError: pass if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if "clean" in sys.argv[1:]: # Source: https://bit.ly/2NLVsgE print("deleting Cython files...") import subprocess subprocess.run( ["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"], shell=True, ) extra_packages = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, ) def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files try: # symlink examples into fairseq package so package_data accepts them fairseq_examples = os.path.join("fairseq", "examples") if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples): os.symlink(os.path.join("..", "examples"), fairseq_examples) package_data = { "fairseq": ( get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config")) ) } do_setup(package_data) finally: if "build_ext" not in sys.argv[1:] and os.path.exists(fairseq_examples): os.unlink(fairseq_examples)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/setup.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. """ from fairseq_cli.train import cli_main if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import functools import importlib dependencies = [ "dataclasses", "hydra", "numpy", "omegaconf", "regex", "requests", "torch", ] # Check for required dependencies and raise a RuntimeError if any are missing. missing_deps = [] for dep in dependencies: try: importlib.import_module(dep) except ImportError: # Hack: the hydra package is provided under the "hydra-core" name in # pypi. We don't want the user mistakenly calling `pip install hydra` # since that will install an unrelated package. if dep == "hydra": dep = "hydra-core" missing_deps.append(dep) if len(missing_deps) > 0: raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps))) # only do fairseq imports after checking for dependencies from fairseq.hub_utils import ( # noqa; noqa BPEHubInterface as bpe, TokenizerHubInterface as tokenizer, ) from fairseq.models import MODEL_REGISTRY # noqa # torch.hub doesn't build Cython components, so if they are not found then try # to build them here try: import fairseq.data.token_block_utils_fast # noqa except ImportError: try: import cython # noqa import os from setuptools import sandbox sandbox.run_setup( os.path.join(os.path.dirname(__file__), "setup.py"), ["build_ext", "--inplace"], ) except ImportError: print( "Unable to build Cython components. Please make sure Cython is " "installed if the torch.hub model you are loading depends on it." ) # automatically expose models defined in FairseqModel::hub_models for _model_type, _cls in MODEL_REGISTRY.items(): for model_name in _cls.hub_models().keys(): globals()[model_name] = functools.partial( _cls.from_pretrained, model_name, )
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/hubconf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import tempfile import unittest import torch from fairseq.data import Dictionary class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ "A B C D", "B C D", "C D", "D", ] ref_ids1 = list( map( torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ], ) ) ref_ids2 = list( map( torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ], ) ) # build dictionary d = Dictionary() for line in txt: d.encode_line(line, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(dictionary.encode_line(line, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode="w") as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) def test_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999 #fairseq:overwrite\n" "<s> 999 #fairseq:overwrite\n" "</s> 999 #fairseq:overwrite\n" ", 999\n" "▁de 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index("<pad>"), 1) self.assertEqual(d.index("foo"), 3) self.assertEqual(d.index("<unk>"), 4) self.assertEqual(d.index("<s>"), 5) self.assertEqual(d.index("</s>"), 6) self.assertEqual(d.index(","), 7) self.assertEqual(d.index("▁de"), 8) def test_no_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n" ) d = Dictionary() with self.assertRaisesRegex(RuntimeError, "Duplicate"): d.add_from_file(dict_file) def test_space(self): # for example, character models treat space as a symbol dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n") d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index(" "), 4) self.assertEqual(d.index("a"), 5) self.assertEqual(d.index("b"), 6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import random import unittest from multiprocessing import Manager import torch import torch.nn as nn from fairseq import distributed_utils, optim from omegaconf import OmegaConf class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(cfg, args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank cfg.distributed_training.distributed_rank = args.distributed_rank if cfg.distributed_training.distributed_world_size > 1: distributed_utils.distributed_init(cfg) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF( cfg=cfg.bmuf, optimizer=optimizer ) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(cfg, args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size cfg = OmegaConf.create() cfg.optimization = OmegaConf.create() cfg.common = OmegaConf.create() cfg.distributed_training = OmegaConf.create() cfg.dataset = OmegaConf.create() cfg.bmuf = OmegaConf.create() cfg.optimizer = OmegaConf.create() cfg.bmuf.global_sync_iter = args.global_sync_iter cfg.bmuf.block_momentum = args.block_momentum cfg.bmuf.block_lr = args.block_lr cfg.dataset.batch_size = args.batch_size cfg.optimization.lr = args.lr cfg.optimizer.momentum = args.momentum cfg.optimizer.weight_decay = args.weight_decay cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.use_nbm = args.use_nbm cfg.bmuf.average_sync = args.average_sync cfg.common.model_parallel_size = args.model_parallel_size cfg.distributed_training.distributed_backend = args.distributed_backend cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.distributed_training.distributed_init_method = args.distributed_init_method cfg.distributed_training.distributed_port = args.distributed_port return cfg, args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, cfg, args, iterations): processes = [] results = Manager().dict() ctx = torch.multiprocessing.get_context("spawn") for rank in range(args.distributed_world_size): p = ctx.Process( target=single_gpu_training, args=(cfg, args, rank, iterations, results) ) p.start() processes.append(p) for p in processes: p.join() return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup cfg, args = setup_args() iterations = 1 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync cfg, args = setup_args() args.warmup_iterations = 20 cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration cfg, args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.global_sync_iter = args.global_sync_iter iterations = 25 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 cfg, args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ] ) right_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ] ) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ] ) left_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ] ) right_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ] ) right_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ] ) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad), ) def test_clip_grad_norm_(self): params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, 0.0) params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)] for p in params: p.grad = torch.full((5,), fill_value=2.0) grad_norm = utils.clip_grad_norm_(params, 1.0) exp_grad_norm = torch.full((15,), fill_value=2.0).norm() self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, exp_grad_norm) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) def test_resolve_max_positions_with_tuple(self): resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000) self.assertEqual(resolved, (2000, 100, 2000)) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fairseq.data import iterators class TestIterators(unittest.TestCase): def test_counting_iterator(self, ref=None, itr=None): if ref is None: assert itr is None ref = list(range(10)) itr = iterators.CountingIterator(ref) else: assert len(ref) == 10 assert itr is not None self.assertTrue(itr.has_next()) self.assertEqual(itr.n, 0) self.assertEqual(next(itr), ref[0]) self.assertEqual(itr.n, 1) self.assertEqual(next(itr), ref[1]) self.assertEqual(itr.n, 2) itr.skip(3) self.assertEqual(itr.n, 5) self.assertEqual(next(itr), ref[5]) itr.skip(3) self.assertEqual(itr.n, 9) self.assertEqual(next(itr), ref[9]) self.assertFalse(itr.has_next()) def test_grouped_iterator(self): # test correctness x = list(range(10)) itr = iterators.GroupedIterator(x, 1) self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) itr = iterators.GroupedIterator(x, 4) self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]) itr = iterators.GroupedIterator(x, 5) self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) # test CountingIterator functionality x = list(range(30)) ref = list(iterators.GroupedIterator(x, 3)) itr = iterators.GroupedIterator(x, 3) self.test_counting_iterator(ref, itr) def test_sharded_iterator(self): # test correctness x = list(range(10)) itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0) self.assertEqual(list(itr), x) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0) self.assertEqual(list(itr), [0, 2, 4, 6, 8]) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1) self.assertEqual(list(itr), [1, 3, 5, 7, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.assertEqual(list(itr), [0, 3, 6, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1) self.assertEqual(list(itr), [1, 4, 7, None]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2) self.assertEqual(list(itr), [2, 5, 8, None]) # test CountingIterator functionality x = list(range(30)) ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0)) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.test_counting_iterator(ref, itr) def test_counting_iterator_take(self): ref = list(range(10)) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) def test_counting_iterator_buffered_iterator_take(self): ref = list(range(10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(buffered_itr), 5) self.assertEqual(len(list(iter(buffered_itr))), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) ref = list(range(4, 10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr, start=4) itr.take(5) self.assertEqual(len(itr), 5) self.assertEqual(len(buffered_itr), 1) self.assertEqual(next(itr), ref[0]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Dict, List import tests.utils as test_utils import torch from fairseq import utils from fairseq.data import ( Dictionary, LanguagePairDataset, TransformEosDataset, data_utils, noising, ) class TestDataNoising(unittest.TestCase): def _get_test_data_with_bpe_cont_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with continuation markers as suffixes to denote non-end of word tokens. This is the standard BPE format used in fairseq's preprocessing. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he@@") vocab.add_symbol("llo") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("y@@") vocab.add_symbol("ou") vocab.add_symbol("n@@") vocab.add_symbol("ew") vocab.add_symbol("or@@") vocab.add_symbol("k") src_tokens = [ ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"], ["how", "are", "y@@", "ou"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_bpe_end_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with end-of-word markers as suffixes to denote tokens at the end of a word. This is an alternative to fairseq's standard preprocessing framework and is not generally supported within fairseq. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he") vocab.add_symbol("llo_EOW") vocab.add_symbol("how_EOW") vocab.add_symbol("are_EOW") vocab.add_symbol("y") vocab.add_symbol("ou_EOW") vocab.add_symbol("n") vocab.add_symbol("ew_EOW") vocab.add_symbol("or") vocab.add_symbol("k_EOW") src_tokens = [ ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"], ["how_EOW", "are_EOW", "y", "ou_EOW"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_word_vocab(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: word vocab x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("you") vocab.add_symbol("new") vocab.add_symbol("york") src_tokens = [ ["hello", "new", "york", "you"], ["how", "are", "you", "new", "york"], ] x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _convert_src_tokens_to_tensor( self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool ): src_len = [len(x) for x in src_tokens] # If we have to append EOS, we include EOS in counting src length if append_eos: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if append_eos: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) return x, torch.LongTensor(src_len) def assert_eos_at_end(self, x, x_len, eos): """Asserts last token of every sentence in x is EOS """ for i in range(len(x_len)): self.assertEqual( x[x_len[i] - 1][i], eos, ( "Expected eos (token id {eos}) at the end of sentence {i} " "but got {other} instead" ).format(i=i, eos=eos, other=x[i][-1]), ) def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised): # Expect only the first word (2 bpe tokens) of the first example # was dropped out self.assertEqual(x_len[0] - 2, l_noised[0]) for i in range(l_noised[0]): self.assertEqual(x_noised[i][0], x[i + 2][0]) def test_word_dropout_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk): # Expect only the first word (2 bpe tokens) of the first example # was blanked out self.assertEqual(x_len[0], l_noised[0]) for i in range(l_noised[0]): if i < 2: self.assertEqual(x_noised[i][0], unk) else: self.assertEqual(x_noised[i][0], x[i][0]) def test_word_blank_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def generate_unchanged_shuffle_map(self, length): return {i: i for i in range(length)} def assert_word_shuffle_matches_expected( self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[int, int]], expect_eos_at_end: bool, bpe_end_marker=None, ): """ This verifies that with a given x, x_len, max_shuffle_distance, and vocab, we get the expected shuffle result. Args: x: Tensor of shape (T x B) = (sequence_length, batch_size) x_len: Tensor of length B = batch_size max_shuffle_distance: arg to pass to noising expected_shuffle_maps: List[mapping] where mapping is a Dict[old_index, new_index], mapping x's elements from their old positions in x to their new positions in x. expect_eos_at_end: if True, check the output to make sure there is an EOS at the end. bpe_end_marker: str denoting the BPE end token. If this is not None, we set the BPE cont token to None in the noising classes. """ bpe_cont_marker = None if bpe_end_marker is None: bpe_cont_marker = "@@" with data_utils.numpy_seed(1234): word_shuffle = noising.WordShuffle( vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker ) x_noised, l_noised = word_shuffle.noising( x, x_len, max_shuffle_distance=max_shuffle_distance ) # For every example, we have a different expected shuffle map. We check # that each example is shuffled as expected according to each # corresponding shuffle map. for i in range(len(expected_shufle_maps)): shuffle_map = expected_shufle_maps[i] for k, v in shuffle_map.items(): self.assertEqual(x[k][i], x_noised[v][i]) # Shuffling should not affect the length of each example for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised): self.assertEqual(pre_shuffle_length, post_shuffle_length) if expect_eos_at_end: self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_shuffle_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=True, ) def test_word_shuffle_with_eos_nonbpe(self): """The purpose of this is to test shuffling logic with word vocabs""" vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ {0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}, ], expect_eos_at_end=True, ) def test_word_shuffle_without_eos(self): """Same result as word shuffle with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, ) def test_word_shuffle_without_eos_with_bpe_end_marker(self): """Same result as word shuffle without eos except using BPE end token""" vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) def assert_no_eos_at_end(self, x, x_len, eos): """Asserts that the last token of each sentence in x is not EOS """ for i in range(len(x_len)): self.assertNotEqual( x[x_len[i] - 1][i], eos, "Expected no eos (token id {eos}) at the end of sentence {i}.".format( eos=eos, i=i ), ) def test_word_dropout_without_eos(self): """Same result as word dropout with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_blank_without_eos(self): """Same result as word blank with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def _get_noising_dataset_batch( self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False, ): """ Constructs a NoisingDataset and the corresponding ``LanguagePairDataset(NoisingDataset(src), src)``. If *append_eos_to_tgt* is True, wrap the source dataset in :class:`TransformEosDataset` to append EOS to the clean source when using it as the target. """ src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset( src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising, ) tgt = src_dataset language_pair_dataset = LanguagePairDataset( src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict ) language_pair_dataset = TransformEosDataset( language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt, ) dataloader = torch.utils.data.DataLoader( dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater, ) denoising_batch_result = next(iter(dataloader)) return denoising_batch_result def test_noising_dataset_with_eos(self): src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=True ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set *append_eos_to_tgt* to ``True``. """ src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=False ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True, ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import LSTMModel from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitLSTMModel(unittest.TestCase): def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) def assertTensorEqual(self, t1, t2): t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs t2 = t2[~torch.isnan(t2)] self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def test_jit_and_export_lstm(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) scripted_model = torch.jit.script(model) self._test_save_and_load(scripted_model) def test_assert_jit_vs_nonjit_(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) model.eval() scripted_model = torch.jit.script(model) scripted_model.eval() idx = len(task.source_dictionary) iter = 100 # Inject random input and check output seq_len_tensor = torch.randint(1, 10, (iter,)) num_samples_tensor = torch.randint(1, 10, (iter,)) for i in range(iter): seq_len = seq_len_tensor[i] num_samples = num_samples_tensor[i] src_token = (torch.randint(0, idx, (num_samples, seq_len)),) src_lengths = torch.randint(1, seq_len + 1, (num_samples,)) src_lengths, _ = torch.sort(src_lengths, descending=True) # Force the first sample to have seq_len src_lengths[0] = seq_len prev_output_token = (torch.randint(0, idx, (num_samples, 1)),) result = model(src_token[0], src_lengths, prev_output_token[0], None) scripted_result = scripted_model( src_token[0], src_lengths, prev_output_token[0], None ) self.assertTensorEqual(result[0], scripted_result[0]) self.assertTensorEqual(result[1], scripted_result[1]) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_lstm_jitable.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class TestSparseMultiheadAttention(unittest.TestCase): def test_sparse_multihead_attention(self): attn_weights = torch.randn(1, 8, 8) bidirectional_sparse_mask = torch.tensor( [ [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) bidirectional_attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=True ) bidirectional_attention_sparse_mask = ( bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8) ) torch.all( torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask) ) sparse_mask = torch.tensor( [ [ 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, float("-inf"), float("-inf"), ], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, float("-inf"), ], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=False ) attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(attention_sparse_mask, sparse_mask)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import uuid from fairseq import metrics class TestMetrics(unittest.TestCase): def test_nesting(self): with metrics.aggregate() as a: metrics.log_scalar("loss", 1) with metrics.aggregate() as b: metrics.log_scalar("loss", 2) self.assertEqual(a.get_smoothed_values()["loss"], 1.5) self.assertEqual(b.get_smoothed_values()["loss"], 2) def test_new_root(self): with metrics.aggregate() as a: metrics.log_scalar("loss", 1) with metrics.aggregate(new_root=True) as b: metrics.log_scalar("loss", 2) self.assertEqual(a.get_smoothed_values()["loss"], 1) self.assertEqual(b.get_smoothed_values()["loss"], 2) def test_nested_new_root(self): with metrics.aggregate() as layer1: metrics.log_scalar("loss", 1) with metrics.aggregate(new_root=True) as layer2: metrics.log_scalar("loss", 2) with metrics.aggregate() as layer3: metrics.log_scalar("loss", 3) with metrics.aggregate(new_root=True) as layer4: metrics.log_scalar("loss", 4) metrics.log_scalar("loss", 1.5) self.assertEqual(layer4.get_smoothed_values()["loss"], 4) self.assertEqual(layer3.get_smoothed_values()["loss"], 3) self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5) self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25) def test_named(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar("loss", 1) metrics.log_scalar("loss", 3) with metrics.aggregate(name): metrics.log_scalar("loss", 2) self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5) def test_nested_duplicate_names(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar("loss", 1) with metrics.aggregate() as other: with metrics.aggregate(name): metrics.log_scalar("loss", 2) metrics.log_scalar("loss", 6) self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3) self.assertEqual(other.get_smoothed_values()["loss"], 2) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_metrics.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import checkpoint_utils, data from omegaconf import OmegaConf def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { "train_iterator": { "epoch": epoch, "iterations_in_epoch": iterations_in_epoch, "shuffle": False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset( tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False ) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_cfg(finetune_from_model): cfg_mock = OmegaConf.create( { "checkpoint": { "optimizer_overrides": "{}", "reset_dataloader": False, "reset_meters": False, "reset_optimizer": False, "reset_lr_scheduler": False, "finetune_from_model": finetune_from_model, "model_parallel_size": 1, }, "common": { "model_parallel_size": 1, }, } ) return cfg_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.cfg_mock = get_mock_cfg(None) self.patches = { "os.makedirs": MagicMock(), "os.path.join": MagicMock(), "os.path.isfile": MagicMock(return_value=True), "os.path.isabs": MagicMock(return_value=False), "fairseq.file_io.PathManager.exists": MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches["os.path.isfile"].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in [ "reset_optimizer", "reset_lr_scheduler", "reset_meters", "reset_dataloader", ]: with self.subTest(arg=arg): cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt") cfg_mock["checkpoint"][arg] = True with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint( cfg_mock.checkpoint, trainer ) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith("checkpoint_last.pt"): return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import tests.utils as test_utils import torch from fairseq.sequence_scorer import SequenceScorer class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { "source": torch.LongTensor([w1, w2, eos]), "target": torch.LongTensor([w1, w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ] ), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample["id"].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]["target"]) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models.transformer import TransformerModel from tests.test_sequence_generator import get_dummy_task_and_parser class TestInferenceDropout(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() TransformerModel.add_args(self.parser) self.args = self.parser.parse_args([]) self.args.encoder_layers = 2 self.args.decoder_layers = 1 logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_sets_inference_dropout_to_true(self): self.args.retain_dropout = True self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.apply_during_inference def test_inference_dropout_false_by_default(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert not self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference def test_applies_training_mode(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) assert self.transformer_model.encoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.training self.transformer_model.eval() assert not self.transformer_model.decoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.training def test_retain_modules(self): self.args.retain_dropout = True self.args.retain_dropout_modules = [ "TransformerEncoder", "TransformerEncoderLayer", ] self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_inference_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.multihead_attention import MultiheadAttention class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f"Unexpected resultant key padding mask: {key_padding_mask}" f" given current: {c[0]} and previous: {c[1]}", ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np from fairseq.data.data_utils_fast import batch_by_size_fn from fairseq.data.data_utils_fast import batch_by_size_vec class TestBatchBySize(unittest.TestCase): @classmethod def batch_by_size_baseline( cls, indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): """Simple, reliable and slow implementation of batch by size """ batches = [] start = 0 while start < len(indices): for end in range(start + 1, len(indices) + 1): max_val = max(num_tokens_vec[pos] for pos in range(start, end)) sent_count = end - start num_tokens = max_val * sent_count overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0 terminate = overflow or end == len(indices) if overflow: sent_count -= 1 if terminate: if sent_count > bsz_mult: sent_count = sent_count - sent_count % bsz_mult batches.append(indices[start : start + sent_count]) start = start + sent_count break return batches @classmethod def _get_error_message( cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ): return f"""Reference batch_by_size implementation should produce same output as the baseline method. Params: max_sentences={max_sentences}, max_tokens={max_tokens}, bsz_mult={bsz_mult}, num_tokens_vec={num_tokens_vec}, expected_batches={validation}, returned_batches={results}""" def _compare_results( self, indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ): indices = np.array(list(range(indices_len))) validation = self.batch_by_size_baseline( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) results = batch_by_size_impl( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) error_msg = self._get_error_message( max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ) self.assertEqual(len(validation), len(results), error_msg) for first, second in zip(validation, results): self.assertTrue(np.array_equal(first, second), error_msg) def _run_compare_with_baseline_sweep(self, batch_by_size_impl): """Compare reference batch_by_size implementation with batch_by_size_baseline across a dense grid of hyperparam values""" MAX_MAX_TOKENS = 10 NUM_TOKENS_VECS_COUNT = 5 for indices_len in [10, 11]: # try odd and even len of indices for max_sentences in range(0, indices_len + 2): for max_tokens in range(0, MAX_MAX_TOKENS): for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2): for _ in range(NUM_TOKENS_VECS_COUNT): num_tokens_vec = np.random.randint( 0, max_tokens + 1, size=indices_len ) self._compare_results( indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ) class TestBatchBySizeVec(TestBatchBySize): def test_compare_with_baseline(self): self._run_compare_with_baseline_sweep(batch_by_size_vec) class TestBatchBySizeFn(TestBatchBySize): def test_compare_with_baseline(self): def batch_by_size_fn_wrapper( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): def num_tokens_fn(idx): return num_tokens_vec[idx] return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult ) self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO from fairseq import checkpoint_utils from tests.utils import ( create_dummy_data, preprocess_translation_data, train_translation_model, ) class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @contextlib.contextmanager def _train_transformer(self, seed, extra_args=None): if extra_args is None: extra_args = [] with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--seed", str(seed), ] + extra_args, ) yield os.path.join(data_dir, "checkpoint_last.pt") def test_load_model_ensemble_and_task(self): with contextlib.redirect_stdout(StringIO()): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model1, model2] ) self.assertEqual(len(ensemble), 2) # after Transformer has been migrated to Hydra, this will probably # become cfg.common.seed self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) # the task from the first model should be returned self.assertEqual(task.args.seed, 123) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"] with self._train_transformer(seed=1, extra_args=extra_args) as model: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model], arg_overrides={ "encoder_layers_to_keep": "0,2", "decoder_layers_to_keep": "1", }, ) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest import mock class TestIOPath(unittest.TestCase): def test_no_iopath(self): from .test_reproducibility import TestReproducibility with mock.patch.dict("sys.modules", {"iopath": None}): # reuse reproducibility tests, which are e2e tests that should cover # most checkpoint related functionality TestReproducibility._test_reproducibility(self, "test_reproducibility") def test_no_supports_rename(self): from .test_reproducibility import TestReproducibility with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn: mock_fn.return_value = False TestReproducibility._test_reproducibility(self, "test_reproducibility") if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_iopath.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import unittest import numpy as np from fairseq.data import ListDataset, ResamplingDataset class TestResamplingDataset(unittest.TestCase): def setUp(self): self.strings = ["ab", "c", "def", "ghij"] self.weights = [4.0, 2.0, 7.0, 1.5] self.size_ratio = 2 self.dataset = ListDataset( self.strings, np.array([len(s) for s in self.strings]) ) def _test_common(self, resampling_dataset, iters): assert len(self.dataset) == len(self.strings) == len(self.weights) assert len(resampling_dataset) == self.size_ratio * len(self.strings) results = {"ordered_by_size": True, "max_distribution_diff": 0.0} totalfreqs = 0 freqs = collections.defaultdict(int) for epoch_num in range(iters): resampling_dataset.set_epoch(epoch_num) indices = resampling_dataset.ordered_indices() assert len(indices) == len(resampling_dataset) prev_size = -1 for i in indices: cur_size = resampling_dataset.size(i) # Make sure indices map to same sequences within an epoch assert resampling_dataset[i] == resampling_dataset[i] # Make sure length of sequence is correct assert cur_size == len(resampling_dataset[i]) freqs[resampling_dataset[i]] += 1 totalfreqs += 1 if prev_size > cur_size: results["ordered_by_size"] = False prev_size = cur_size assert set(freqs.keys()) == set(self.strings) for s, weight in zip(self.strings, self.weights): freq = freqs[s] / totalfreqs expected_freq = weight / sum(self.weights) results["max_distribution_diff"] = max( results["max_distribution_diff"], abs(expected_freq - freq) ) return results def test_resampling_dataset_batch_by_size_false(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = False, the batches should be returned in # arbitrary order of size. assert not results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 def test_resampling_dataset_batch_by_size_true(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = True, the batches should be returned in # increasing order of size. assert results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_resampling_dataset.py
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator class TestBacktranslationDataset(unittest.TestCase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model, ) = test_utils.sequence_generator_setup() dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import json import os import random import sys import tempfile import unittest from io import StringIO from typing import List, Dict import torch from fairseq import options from fairseq_cli import eval_lm, train, validate from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_summarization_data, preprocess_translation_data, train_translation_model, ) try: import transformers # noqa has_hf_transformers = True except ImportError: has_hf_transformers = False class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--dataset-impl", "raw"]) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"] ) generate_main(data_dir, ["--dataset-impl", "raw"]) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_update_freq") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"] ) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_max_positions") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, "fconv_iwslt_de_en", ["--max-target-positions", "5"], ) self.assertTrue( "skip this example with --skip-invalid-size-inputs-valid-test" in str(context.exception) ) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--max-target-positions", "5", "--skip-invalid-size-inputs-valid-test", ], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"]) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_sampling") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main( data_dir, [ "--sampling", "--temperature", "2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topk", "3", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topp", "0.2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--diversity-rate", "0.5", "--beam", "6", ], ) with self.assertRaises(ValueError): generate_main( data_dir, [ "--diverse-beam-groups", "4", "--match-source-len", ], ) generate_main(data_dir, ["--prefix-size", "2"]) generate_main(data_dir, ["--retain-dropout"]) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--eval-bleu", "--eval-bleu-print-samples", "--eval-bleu-remove-bpe", "--eval-bleu-detok", "space", "--eval-bleu-args", '{"beam": 4, "min_len": 10}', ], ) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm_wiseman_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", ], ) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm", [ "--encoder-layers", "2", "--encoder-bidirectional", "--encoder-hidden-size", "16", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--decoder-layers", "2", ], ) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], run_validation=True, ) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_multilingual_transformer_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch="multilingual_transformer", task="multilingual_translation", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "multilingual_translation", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) @unittest.skipIf( sys.platform.lower() == "darwin", "skip latent depth test on MacOS" ) def test_multilingual_translation_latent_depth(self): # test with latent depth in encoder, decoder, or both encoder_latent_layer = [[], ["--encoder-latent-layer"]] decoder_latent_layer = [[], ["--decoder-latent-layer"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_latent_layer)): for j in range(len(decoder_latent_layer)): if i == 0 and j == 0: continue enc_ll_flag = encoder_latent_layer[i] dec_ll_flag = decoder_latent_layer[j] with tempfile.TemporaryDirectory( f"test_multilingual_translation_latent_depth_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="latent_multilingual_transformer", task="multilingual_translation_latent_depth", extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--share-encoders", "--share-decoders", "--sparsity-weight", "0.1", ] + enc_ll_flag + dec_ll_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", ] + enc_ll_flag + dec_ll_flag, ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--task", "multilingual_translation_latent_depth", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ll_flag + dec_ll_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_translation_multi_simple_epoch_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_no_vepoch(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_dicts(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_src_tgt_dict_spec(self): # test the specification of explicit --src-dict and --tgt-dict with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=[] ) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--source-dict", f"{data_dir}/dict.in.txt", "--target-dict", f"{data_dir}/dict.out.txt", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_cross_self_attention" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-embed-dim", "8", "--no-cross-attention", "--cross-self-attention", ], run_validation=True, ) generate_main(data_dir, extra_flags=[]) def test_transformer_pointer_generator(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_pointer_generator" ) as data_dir: create_dummy_data(data_dir) preprocess_summarization_data(data_dir) train_translation_model( data_dir, "transformer_pointer_generator", extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--alignment-layer", "-1", "--alignment-heads", "1", "--source-position-markers", "0", ], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "lightweight", "--decoder-conv-type", "lightweight", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "dynamic", "--decoder-conv-type", "dynamic", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "cmlm_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "0", "--iter-decode-eos-penalty", "0", "--print-step", ], ) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_iterative_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "iterative_nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--stochastic-approx", "--dae-ratio", "0.5", "--train-step", "3", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "insertion_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "random_mask", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_moe") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main( data_dir, [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--gen-expert", "0", ], ) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", ], run_validation=True, ) generate_main(data_dir) def test_alignment_full_context(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", "--full-context-alignment", ], run_validation=True, ) generate_main(data_dir) def test_transformer_layerdrop(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01", ], ) generate_main(data_dir) generate_main( data_dir, [ "--model-overrides", "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}", ], ) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ "--encoder-layers", "[(128, 3)] * 2", "--decoder-layers", "[(128, 3)] * 2", "--decoder-attention", "True", "--encoder-attention", "False", "--gated-attention", "True", "--self-attention", "True", "--project-input", "True", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--multihead-self-attention-nheads", "2", ] train_translation_model(data_dir, "fconv_self_att_wp", config) generate_main(data_dir) # fusion model os.rename( os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "pretrained.pt"), ) config.extend( [ "--pretrained", "True", "--pretrained-checkpoint", os.path.join(data_dir, "pretrained.pt"), "--save-dir", os.path.join(data_dir, "fusion_model"), ] ) train_translation_model(data_dir, "fconv_self_att_wp", config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "fconv_lm", [ "--decoder-layers", "[(850, 3)] * 2 + [(1024,4)]", "--decoder-embed-dim", "280", "--optimizer", "nag", "--lr", "0.1", ], ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm_with_adaptive_softmax(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_lm_with_adaptive_softmax" ) as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", ], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lightconv_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token", "--residuals"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) @unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing") def test_transformer_xl_bptt_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) task_flags = [ "--user-dir", "examples/truncated_bptt", "--task", "truncated_bptt_lm", "--batch-size", "2", "--tokens-per-sample", "50", ] train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) eval_lm_main(data_dir, extra_flags=task_flags) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"] ) def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_linformer_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "linformer_roberta_base", extra_flags=[ "--user-dir", "examples/linformer/linformer_src", "--encoder-layers", "2", ], ) def test_linformer_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=["--user-dir", "examples/linformer/linformer_src"], ) def test_linformer_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def test_linformer_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=("--encoder-learned-pos",) if learned_pos_emb else (), ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (["--init-encoder-only"] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def test_r4f_roberta(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=[ "--user-dir", "examples/rxf/rxf_src", "--criterion", "sentence_prediction_r3f", "--spectral-norm-classification-head", ], ) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--stop-min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", "--num-workers", "0", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_optimizers") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"] last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt") for optimizer in optimizers: if os.path.exists(last_checkpoint): os.remove(last_checkpoint) train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", optimizer, ], ) generate_main(data_dir) def read_last_log_entry( logs: List[logging.LogRecord], logger_name: str ) -> Dict[str, float]: for x in reversed(logs): if x.name == logger_name: return json.loads(x.message) raise ValueError(f"No entries from {logger_name} found in captured logs") class TestActivationCheckpointing(unittest.TestCase): def test_activation_checkpointing_does_not_change_metrics(self): """--checkpoint-activations should not change loss""" base_flags = [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--restore-file", "x.pt", "--log-format", "json", "--log-interval", "1", "--max-update", "2", ] def _train(extra_flags): with self.assertLogs() as logs: train_translation_model( data_dir, "transformer_iwslt_de_en", base_flags + extra_flags, run_validation=True, extra_valid_flags=["--log-format", "json"], ) return logs.records with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) ckpt_logs = _train(["--checkpoint-activations"]) baseline_logs = _train([]) assert len(baseline_logs) == len(ckpt_logs) baseline_train_stats = read_last_log_entry(baseline_logs, "train") ckpt_train_stats = read_last_log_entry(ckpt_logs, "train") assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"] baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid") assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"] def create_dummy_roberta_head_data( data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False ): input_dir = "input0" def _create_dummy_data(filename): random_data = torch.rand(num_examples * maxlen) input_data = 97 + torch.floor(26 * random_data).int() if regression: output_data = torch.rand((num_examples, num_classes)) else: output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in: label_filename = filename + ".label" if regression else filename + ".out" with open(os.path.join(data_dir, "label", label_filename), "w") as f_out: offset = 0 for i in range(num_examples): # write example input ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, input_data[offset : offset + ex_len])) print(ex_str, file=f_in) # write example label if regression: class_str = " ".join(map(str, output_data[i].numpy())) print(class_str, file=f_out) else: class_str = "class{}".format(output_data[i]) print(class_str, file=f_out) offset += ex_len os.mkdir(os.path.join(data_dir, input_dir)) os.mkdir(os.path.join(data_dir, "label")) _create_dummy_data("train") _create_dummy_data("valid") _create_dummy_data("test") def train_masked_lm(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "masked_lm", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "masked_lm", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "sentence_prediction", data_dir, "--arch", arch, "--encoder-layers", "2", "--num-classes", str(num_classes), "--optimizer", "adam", "--lr", "0.0001", "--criterion", "sentence_prediction", "--max-tokens", "500", "--max-positions", "500", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def train_language_model( data_dir, arch, extra_flags=None, run_validation=False, extra_valid_flags=None, task="language_modeling", ): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + (extra_valid_flags or []), ) validate.main(validate_args) def eval_lm_main(data_dir, extra_flags=None): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) eval_lm.main(eval_lm_args) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_binaries.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import unittest import torch from fairseq.token_generation_constraints import * def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ([[]], torch.tensor([[0]])), ([[], []], torch.tensor([[0], [0]])), ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), ( [ [ torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7]), ], [], [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], ], torch.tensor( [ [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], ] ), ), ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, ), ([], "[None].False#0", {}), (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", {1: 2, 2: 2}, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", {1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 2, 94], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 3, 999, 1, 4], {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, ), ( self.examples[0][0], [4, 5, 6, 8], {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[4][0], [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( self.examples[4][0], [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ( self.examples[5][0], [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ( ConstraintNode.print_graph(c) == expected ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert ( c.token_counts() == gold_counts ), f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert ( all_tokens == state.next_tokens() ), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1], [2, 3]]), [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import tests.utils as test_utils import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, ) class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, eos]), }, { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, w1, eos]), }, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.report_accuracy = False self.args.probs = ( torch.FloatTensor( [ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ] ) .unsqueeze(0) .expand(2, 3, 7) ) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import random import sys from io import StringIO import torch import torch.nn.functional as F from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import LegacyFairseqTask from fairseq_cli import generate, interactive, preprocess, train, validate def dummy_dictionary(vocab_size, prefix="token_"): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if "id" not in sample: sample["id"] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [ 0.6, unk, 0.2, 0.2, ], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [ 0.60, unk, 0.4, 0.00, ], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [ 0.1, unk, 0.5, 0.4, ], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [ 1.0, unk, 0.0, 0.0, ], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ] ), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False): def _create_dummy_data(filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(data_dir, filename), "w") as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, data[offset : offset + ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), "r") as src_f, open( os.path.join(data_dir, filename_tgt), "r" ) as tgt_f, open(os.path.join(data_dir, filename), "w") as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = " ".join( [ "{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices) ] ) print(ex_str, file=h) _create_dummy_data("train.in") _create_dummy_data("train.out") _create_dummy_data("valid.in") _create_dummy_data("valid.out") _create_dummy_data("test.in") _create_dummy_data("test.out") if alignment: _create_dummy_alignment_data("train.in", "train.out", "train.align") _create_dummy_alignment_data("valid.in", "valid.out", "valid.align") _create_dummy_alignment_data("test.in", "test.out", "test.align") def preprocess_lm_data(data_dir): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(data_dir, "train.out"), "--validpref", os.path.join(data_dir, "valid.out"), "--testpref", os.path.join(data_dir, "test.out"), "--destdir", data_dir, ] ) preprocess.main(preprocess_args) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def preprocess_summarization_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--joined-dictionary", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def train_translation_model( data_dir, arch, extra_flags=None, task="translation", run_validation=False, lang_flags=None, extra_valid_flags=None, ): if lang_flags is None: lang_flags = [ "--source-lang", "in", "--target-lang", "out", ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--save-dir", data_dir, "--arch", arch, "--optimizer", "nag", "--lr", "0.05", "--max-tokens", "500", "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--num-workers", "0", ] + lang_flags + (extra_flags or []), ) train.main(train_args) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + lang_flags + (extra_valid_flags or []), ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None, path=None): if extra_flags is None: extra_flags = [ "--print-alignment", ] if path is None: path = os.path.join(data_dir, "checkpoint_last.pt") generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, "--path", path, "--beam", "3", "--batch-size", "64", "--max-len-b", "5", "--gen-subset", "valid", "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = "-" generate_args.batch_size = None orig_stdin = sys.stdin sys.stdin = StringIO("h e l l o\n") interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(LegacyFairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, "beam_probs") or hasattr(args, "probs") args.max_decoder_positions = getattr(args, "max_decoder_positions", 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, "step") if step is None: step = 0 utils.set_incremental_state(self, incremental_state, "step", step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, "probs"): assert ( self.args.probs.dim() == 3 ), "expected probs to have size bsz*steps*vocab" probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert "fancy_other_input" in kwargs assert kwargs["fancy_other_input"] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules import ConvTBC class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual( output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data ) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual( conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data ) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual( input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data ) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.cfg_dls = OmegaConf.create( { "optimization": { "lr": [0.1], }, "optimizer": { "_name": "adam", "lr": [0.1], "adam_betas": "(0.9, 0.999)", "adam_eps": 1e-8, "weight_decay": 0.0, }, "common": { "fp16_init_scale": 1, "fp16_scale_window": 1, "fp16_scale_tolerance": 1, "threshold_loss_scale": 1, "min_loss_scale": 1e-4, "tpu": False, }, } ) logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual( model.weight, torch.tensor( [[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual( model.bias, torch.tensor( [5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual(optimizer.scaler.loss_scale, 2.0) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) self.assertTrue( all( torch.all( fp32_params.eq( torch.tensor( [3.1000, 5.1000], device="cuda:0", requires_grad=True ) ) ) for fp32_params in optimizer.fp32_params.values() ) ) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer # with pseudo DictConfigs optimizer = FairseqAdam( cfg=OmegaConf.create( vars( argparse.Namespace( adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, lr=[0.00001], ) ) ), params=params, ) me_optimizer = MemoryEfficientFP16Optimizer( cfg=OmegaConf.create( { "common": vars( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ) ) } ), params=params, optimizer=optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import TokenBlockDataset class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none") self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=3, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import math import numpy as np import tests.utils as test_utils import torch from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.sequence_generator import EnsembleModel, SequenceGenerator from fairseq.ngram_repeat_block import NGramRepeatBlock from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), n=1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) JIT_MSG = "Targeting OSS scriptability for the 1.6 release" @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) class TestJitSequenceGenerator(TestJitSequenceGeneratorBase): def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2, max_len_b=10, ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequenceGenerator(TestSequenceGeneratorBase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model, ) = test_utils.sequence_generator_setup() self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator( [reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]["attention"] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"] hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) @unittest.skipUnless(torch.cuda.is_available(), "") class TestRepeatNgramBlocking(TestSequenceGeneratorBase): @classmethod def setUpClass(cls): ( cls.tgt_dict, cls.w1, cls.w2, src_tokens, src_lengths, cls.model, ) = test_utils.sequence_generator_setup() return cls def test_finds_repetitive_tokens(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") desired_result = lprobs.new_tensor( [[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]] ) cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem( bsz, beam_size, generated_tok, lprobs, step, 2 ) self.assertTensorEqual(cuda_ext_result, desired_result) self.assertTensorEqual(baseline_result, desired_result) @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) def test_jit_no_extension(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") blocker = NGramRepeatBlock(2, use_extension=False) base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step) scripted_blocker = torch.jit.script(blocker) jit_result = scripted_blocker( generated_tok, lprobs.clone(), bsz, beam_size, step ) self.assertTensorEqual(base_result, jit_result) def test_ngram_blocking_same_as_default_implem(self): """Test that cuda extension returns same things as default impl in many settings.""" vocab_size = 4 step = 6 for _ in range(2): block_param = np.random.choice([1, 2, 3, 4]) batch_size = np.random.randint(1, 8) beam_size = np.random.choice([1, 2, 4, 8]) lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda") generated_tok = torch.tensor( np.random.randint( 0, vocab_size, size=(batch_size * beam_size, step + 1) ), device="cuda", dtype=torch.long, ) self._compare_cuda_ext_to_default_implem( batch_size, beam_size, generated_tok, lprobs, step, block_param, ) def _compare_cuda_ext_to_default_implem( self, bsz, beam_size, generated_tok, lprobs, step, block_param ): """Assert that cuda extension and default implem return the same thing.""" blocker = NGramRepeatBlock(block_param) assert blocker.use_extension, "Extension not compiled" cuda_ext_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) blocker.use_extension = False baseline_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) self.assertTensorEqual(cuda_ext_result, baseline_result) blocker.use_extension = True return cuda_ext_result, baseline_result class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=high_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue( self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]) ) # sentence 1, beam 2 self.assertTrue( self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]) ) # sentence 2, beam 1 self.assertTrue( self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]) ) # sentence 2, beam 2 self.assertTrue( self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]) ) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo["positional_scores"], pos_scores): return False if pos_scores.numel() != hypo["tokens"].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo["score"]) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset([self.dataset_1, self.dataset_2]) assert len(d) == 2 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 assert d[2]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 1 assert d[2]["source"][0] == 2
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import MonolingualDataset from fairseq.tasks.language_modeling import LanguageModelingTask, LanguageModelingConfig from tests import utils as test_utils class TestLMContextWindow(unittest.TestCase): def test_eval_dataloader(self): dictionary = test_utils.dummy_dictionary(10) assert len(dictionary) == 14 # 4 extra special symbols assert dictionary.pad() == 1 dataset = test_utils.TestDataset([ torch.tensor([4, 5, 6, 7], dtype=torch.long), torch.tensor([8, 9, 10, 11], dtype=torch.long), torch.tensor([12, 13], dtype=torch.long), ]) dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary) config = LanguageModelingConfig(tokens_per_sample=4) task = LanguageModelingTask(config, dictionary) eval_dataloader = task.eval_lm_dataloader( dataset=dataset, batch_size=1, context_window=2, ) batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1] assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11] assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13] assert batch["target"][0].tolist() == [1, 1, 12, 13] if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_lm_context_window.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import sys import tempfile import unittest from typing import Optional from unittest.mock import MagicMock class TestFileIO(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _tmpfile_contents = "Hello, World" @classmethod def setUpClass(cls) -> None: cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name f.write(cls._tmpfile_contents) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_file_io(self): from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_oss(self): # Mock fvcore to simulate oss environment. sys.modules["fvcore"] = MagicMock() from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_file_io.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import os import tempfile import unittest from io import StringIO import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint="checkpoint1.pt", max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if isinstance(log_record.msg, str) and search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, "train_loss") valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss") # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, "checkpoint_last.pt"), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string( logs.records, "train_loss" ) valid_res_log = get_last_log_stats_containing_string( logs.records, "valid_loss" ) for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]: self.assertAlmostEqual( float(train_log[k]), float(train_res_log[k]), delta=delta ) for k in [ "valid_loss", "valid_ppl", "valid_num_updates", "valid_best_loss", ]: self.assertAlmostEqual( float(valid_log[k]), float(valid_res_log[k]), delta=delta ) def test_reproducibility(self): self._test_reproducibility("test_reproducibility") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_fp16(self): self._test_reproducibility( "test_reproducibility_fp16", [ "--fp16", "--fp16-init-scale", "4096", ], delta=0.011, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility( "test_reproducibility_memory_efficient_fp16", [ "--memory-efficient-fp16", "--fp16-init-scale", "4096", ], ) def test_mid_epoch_reproducibility(self): self._test_reproducibility( "test_mid_epoch_reproducibility", ["--save-interval-updates", "3"], resume_checkpoint="checkpoint_1_3.pt", max_epoch=1, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("there") embedder = CharacterTokenEmbedder( vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2 ) test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import shutil import tempfile import unittest import numpy as np import torch from scripts.average_checkpoints import average_checkpoints from torch import nn class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ("a", torch.DoubleTensor([100.0])), ("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ("c", torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ("a", torch.DoubleTensor([1.0])), ("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ("c", torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ("a", torch.DoubleTensor([50.5])), ("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ("c", torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([("model", params_0)]), path_0) torch.save(collections.OrderedDict([("model", params_1)]), path_1) output = average_checkpoints([path_0, path_1])["model"] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items() ): self.assertEqual( k_expected, k_out, "Key mismatch - expected {} but found {}. " "(Expected list of keys: {} vs actual list of keys: {})".format( k_expected, k_out, params_avg.keys(), output.keys() ), ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg="Tensor value mismatch for key {}".format(k_expected), ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save({"model": m.state_dict()}, path) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model["model"]["embedding.weight"], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC1.weight"], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC2.weight"], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0, ) ) shutil.rmtree(tmpdir) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_export.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(l): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/test_multi_corpus_sampled_dataset.py
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/gpu/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_translation_model, ) class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--fp16"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) gen_config = [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ] # non-ensemble generation generate_main(data_dir, gen_config) # ensemble generation generate_main( data_dir, gen_config, path=os.pathsep.join([ os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "checkpoint_last.pt"), ]), ) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import sys import unittest import torch from fairseq import distributed_utils as dist_utils from .utils import objects_are_equal, spawn_and_init class TestDistributedUtils(unittest.TestCase): def setUp(self): if not torch.cuda.is_available(): raise unittest.SkipTest("CUDA not available, skipping test") if sys.platform == "win32": raise unittest.SkipTest("NCCL doesn't support Windows, skipping test") if torch.cuda.device_count() < 2: raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping") def test_broadcast_object_python(self): spawn_and_init( functools.partial( TestDistributedUtils._test_broadcast_object, "hello world", ), world_size=2, ) def test_broadcast_object_tensor(self): spawn_and_init( functools.partial( TestDistributedUtils._test_broadcast_object, torch.rand(5), ), world_size=2, ) def test_broadcast_object_complex(self): spawn_and_init( functools.partial( TestDistributedUtils._test_broadcast_object, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int().cuda(), }, ), world_size=2, ) @staticmethod def _test_broadcast_object(ref_obj, rank, group): obj = dist_utils.broadcast_object( ref_obj if rank == 0 else None, src_rank=0, group=group ) assert objects_are_equal(ref_obj, obj) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/distributed/test_distributed_utils.py
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/distributed/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import tempfile import torch def spawn_and_init(fn, world_size, args=None): if args is None: args = () with tempfile.NamedTemporaryFile(delete=False) as tmp_file: torch.multiprocessing.spawn( fn=functools.partial(init_and_run, fn, args), args=(world_size, tmp_file.name,), nprocs=world_size, ) def distributed_init(rank, world_size, tmp_file): torch.distributed.init_process_group( backend="nccl", init_method="file://{}".format(tmp_file), world_size=world_size, rank=rank, ) torch.cuda.set_device(rank) def init_and_run(fn, args, rank, world_size, tmp_file): distributed_init(rank, world_size, tmp_file) group = torch.distributed.new_group() fn(rank, group, *args) def objects_are_equal(a, b) -> bool: if type(a) is not type(b): return False if isinstance(a, dict): if set(a.keys()) != set(b.keys()): return False for k in a.keys(): if not objects_are_equal(a[k], b[k]): return False return True elif isinstance(a, (list, tuple, set)): if len(a) != len(b): return False return all(objects_are_equal(x, y) for x, y in zip(a, b)) elif torch.is_tensor(a): return ( a.size() == b.size() and a.dtype == b.dtype and a.device == b.device and torch.all(a == b) ) else: return a == b
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/distributed/utils.py
#!/usr/bin/env python3 import argparse import os import unittest from inspect import currentframe, getframeinfo import numpy as np import torch from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.data import data_utils as fairseq_data_utils from fairseq.data.dictionary import Dictionary from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqModel, ) from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 # /////////////////////////////////////////////////////////////////////////// # utility function to setup dummy dict/task/input # /////////////////////////////////////////////////////////////////////////// def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.tgt_dict = self.dictionary @property def target_dictionary(self): return self.dictionary def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def get_dummy_input(T=100, D=80, B=5, K=100): forward_input = {} # T max sequence length # D feature vector dimension # B batch size # K target dimension size feature = torch.randn(B, T, D) # this (B, T, D) layout is just a convention, you can override it by # write your own _prepare_forward_input function src_lengths = torch.from_numpy( np.random.randint(low=1, high=T, size=B, dtype=np.int64) ) src_lengths[0] = T # make sure the maximum length matches prev_output_tokens = [] for b in range(B): token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1) tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64) prev_output_tokens.append(torch.from_numpy(tokens)) prev_output_tokens = fairseq_data_utils.collate_tokens( prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False, ) src_lengths, sorted_order = src_lengths.sort(descending=True) forward_input["src_tokens"] = feature.index_select(0, sorted_order) forward_input["src_lengths"] = src_lengths forward_input["prev_output_tokens"] = prev_output_tokens return forward_input def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)): """ This only provides an example to generate dummy encoder output """ (T, B, D) = encoder_out_shape encoder_out = {} encoder_out["encoder_out"] = torch.from_numpy( np.random.randn(*encoder_out_shape).astype(np.float32) ) seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B)) # some dummy mask encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand( B, -1 ) >= seq_lengths.view(B, 1).expand(-1, T) encoder_out["encoder_padding_mask"].t_() # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate # whether encoder_out[t, b] is valid (=0) or not (=1) return encoder_out def _current_postion_info(): cf = currentframe() frameinfo = " (at {}:{})".format( os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno ) return frameinfo def check_encoder_output(encoder_output, batch_size=None): """we expect encoder_output to be a dict with the following key/value pairs: - encoder_out: a Torch.Tensor - encoder_padding_mask: a binary Torch.Tensor """ if not isinstance(encoder_output, dict): msg = ( "FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info() ) return False, msg if "encoder_out" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_out" + _current_postion_info() ) return False, msg if "encoder_padding_mask" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_padding_mask" + _current_postion_info() ) return False, msg if not isinstance(encoder_output["encoder_out"], torch.Tensor): msg = "encoder_out must be a torch.Tensor" + _current_postion_info() return False, msg if encoder_output["encoder_out"].dtype != torch.float32: msg = "encoder_out must have float32 dtype" + _current_postion_info() return False, msg mask = encoder_output["encoder_padding_mask"] if mask is not None: if not isinstance(mask, torch.Tensor): msg = ( "encoder_padding_mask must be a torch.Tensor" + _current_postion_info() ) return False, msg if mask.dtype != torch.uint8 and ( not hasattr(torch, "bool") or mask.dtype != torch.bool ): msg = ( "encoder_padding_mask must have dtype of uint8" + _current_postion_info() ) return False, msg if mask.dim() != 2: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)" + _current_postion_info() ) return False, msg if batch_size is not None and mask.size(1) != batch_size: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, with size(1)" + " being the batch size" + _current_postion_info() ) return False, msg return True, None def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None # /////////////////////////////////////////////////////////////////////////// # Base Test class # /////////////////////////////////////////////////////////////////////////// class TestBaseFairseqModelBase(unittest.TestCase): """ This class is used to facilitate writing unittest for any class derived from `BaseFairseqModel`. """ @classmethod def setUpClass(cls): if cls is TestBaseFairseqModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model): self.assertTrue(isinstance(model, BaseFairseqModel)) self.model = model def setupInput(self): pass def setUp(self): self.model = None self.forward_input = None pass class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase): """ base code to test FairseqEncoderDecoderModel (formally known as `FairseqModel`) must be derived from this base class """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderDecoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg="This class only tests for FairseqModel subclasses", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input def setUp(self): super().setUp() def test_forward(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) # for FairseqEncoderDecoderModel, forward returns a tuple of two # elements, the first one is a Torch.Tensor succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderModelBase(TestBaseFairseqModelBase): """ base class to test FairseqEncoderModel """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, FairseqEncoderModel), msg="This class is only used for testing FairseqEncoderModel", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): super().setUp() def test_forward(self): if self.forward_input and self.model: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.model.forward(**self.forward_input) # we expect forward_output to be a dict with the following # key/value pairs: # - encoder_out: a Torch.Tensor # - encoder_padding_mask: a binary Torch.Tensor succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderBase(unittest.TestCase): """ base class to test FairseqEncoder """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpEncoder(self, encoder): self.assertTrue( isinstance(encoder, FairseqEncoder), msg="This class is only used for test FairseqEncoder", ) self.encoder = encoder def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): self.encoder = None self.forward_input = None def test_forward(self): if self.encoder and self.forward_input: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.encoder.forward(**self.forward_input) succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output class TestFairseqDecoderBase(unittest.TestCase): """ base class to test FairseqDecoder """ @classmethod def setUpClass(cls): if cls is TestFairseqDecoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpDecoder(self, decoder): self.assertTrue( isinstance(decoder, FairseqDecoder), msg="This class is only used for test FairseqDecoder", ) self.decoder = decoder def setUpInput(self, input=None): self.forward_input = get_dummy_encoder_output() if input is None else input def setUpPrevOutputTokens(self, tokens=None): if tokens is None: self.encoder_input = get_dummy_input() self.prev_output_tokens = self.encoder_input["prev_output_tokens"] else: self.prev_output_tokens = tokens def setUp(self): self.decoder = None self.forward_input = None self.prev_output_tokens = None def test_forward(self): if ( self.decoder is not None and self.forward_input is not None and self.prev_output_tokens is not None ): forward_output = self.decoder.forward( prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input, ) succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_input = forward_output class DummyEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @classmethod def build_model(cls, args, task): return cls(DummyEncoder()) def get_logits(self, net_output): # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as # F.binary_cross_entropy_with_logits combines sigmoid and CE return torch.log( torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"]) ) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample) lprobs.batch_first = True return lprobs class DummyEncoder(FairseqEncoder): def __init__(self): super().__init__(None) def forward(self, src_tokens, src_lengths): mask, max_len = lengths_to_encoder_padding_mask(src_lengths) return {"encoder_out": src_tokens, "encoder_padding_mask": mask} class CrossEntropyCriterionTestBase(unittest.TestCase): @classmethod def setUpClass(cls): if cls is CrossEntropyCriterionTestBase: raise unittest.SkipTest("Skipping base class test case") super().setUpClass() def setUpArgs(self): args = argparse.Namespace() args.sentence_avg = False args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion return args def setUp(self): args = self.setUpArgs() self.model = DummyEncoderModel(encoder=DummyEncoder()) self.criterion = self.criterion_cls.build_criterion(args, task=DummyTask(args)) def get_src_tokens(self, correct_prediction, aggregate): """ correct_prediction: True if the net_output (src_tokens) should predict the correct target aggregate: True if the criterion expects net_output (src_tokens) aggregated across time axis """ predicted_idx = 0 if correct_prediction else 1 if aggregate: src_tokens = torch.zeros((2, 2), dtype=torch.float) for b in range(2): src_tokens[b][predicted_idx] = 1.0 else: src_tokens = torch.zeros((2, 10, 2), dtype=torch.float) for b in range(2): for t in range(10): src_tokens[b][t][predicted_idx] = 1.0 return src_tokens def get_target(self, soft_target): if soft_target: target = torch.zeros((2, 2), dtype=torch.float) for b in range(2): target[b][0] = 1.0 else: target = torch.zeros((2, 10), dtype=torch.long) return target def get_test_sample(self, correct, soft_target, aggregate): src_tokens = self.get_src_tokens(correct, aggregate) target = self.get_target(soft_target) L = src_tokens.size(1) return { "net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])}, "target": target, "ntokens": src_tokens.size(0) * src_tokens.size(1), }
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/asr_test_base.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np import torch from examples.speech_recognition.data.collaters import Seq2SeqCollater class TestSeq2SeqCollator(unittest.TestCase): def test_collate(self): eos_idx = 1 pad_idx = 0 collater = Seq2SeqCollater( feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx ) # 2 frames in the first sample and 3 frames in the second one frames1 = np.array([[7, 8], [9, 10]]) frames2 = np.array([[1, 2], [3, 4], [5, 6]]) target1 = np.array([4, 2, 3, eos_idx]) target2 = np.array([3, 2, eos_idx]) sample1 = {"id": 0, "data": [frames1, target1]} sample2 = {"id": 1, "data": [frames2, target2]} batch = collater.collate([sample1, sample2]) # collate sort inputs by frame's length before creating the batch self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) self.assertEqual(batch["ntokens"], 7) self.assertTensorEqual( batch["net_input"]["src_tokens"], torch.tensor( [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] ), ) self.assertTensorEqual( batch["net_input"]["prev_output_tokens"], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), ) self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) self.assertTensorEqual( batch["target"], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), ) self.assertEqual(batch["nsentences"], 2) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/test_collaters.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from examples.speech_recognition.criterions.cross_entropy_acc import ( CrossEntropyWithAccCriterion, ) from .asr_test_base import CrossEntropyCriterionTestBase class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase): def setUp(self): self.criterion_cls = CrossEntropyWithAccCriterion super().setUp() def test_cross_entropy_all_correct(self): sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 20 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20 def test_cross_entropy_all_wrong(self): sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 0 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/test_cross_entropy.py
#!/usr/bin/env python3 # import models/encoder/decoder to be tested from examples.speech_recognition.models.vggtransformer import ( TransformerDecoder, VGGTransformerEncoder, VGGTransformerModel, vggtransformer_1, vggtransformer_2, vggtransformer_base, ) # import base test class from .asr_test_base import ( DEFAULT_TEST_VOCAB_SIZE, TestFairseqDecoderBase, TestFairseqEncoderBase, TestFairseqEncoderDecoderModelBase, get_dummy_dictionary, get_dummy_encoder_output, get_dummy_input, ) class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_1 use 14 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_1, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_2 use 16 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_2, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_base use 12 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_base, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerEncoderTest(TestFairseqEncoderBase): def setUp(self): super().setUp() self.setUpInput(get_dummy_input(T=50, D=80, B=5)) def test_forward(self): print("1. test standard vggtransformer") self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80)) super().test_forward() print("2. test vggtransformer with limited right context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 5) ) ) super().test_forward() print("3. test vggtransformer with limited left context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(5, -1) ) ) super().test_forward() print("4. test vggtransformer with limited right context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 12), transformer_sampling=(2, 2), ) ) super().test_forward() print("5. test vggtransformer with windowed context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(12, 12), transformer_sampling=(2, 2), ) ) class TransformerDecoderTest(TestFairseqDecoderBase): def setUp(self): super().setUp() dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE) decoder = TransformerDecoder(dict) dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256)) self.setUpDecoder(decoder) self.setUpInput(dummy_encoder_output) self.setUpPrevOutputTokens()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/test_vggtransformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from examples.speech_recognition.data import data_utils class DataUtilsTest(unittest.TestCase): def test_normalization(self): sample_len1 = torch.tensor( [ [ -0.7661, -1.3889, -2.0972, -0.9134, -0.7071, -0.9765, -0.8700, -0.8283, 0.7512, 1.3211, 2.1532, 2.1174, 1.2800, 1.2633, 1.6147, 1.6322, 2.0723, 3.1522, 3.2852, 2.2309, 2.5569, 2.2183, 2.2862, 1.5886, 0.8773, 0.8725, 1.2662, 0.9899, 1.1069, 1.3926, 1.2795, 1.1199, 1.1477, 1.2687, 1.3843, 1.1903, 0.8355, 1.1367, 1.2639, 1.4707, ] ] ) out = data_utils.apply_mv_norm(sample_len1) assert not torch.isnan(out).any() assert (out == sample_len1).all()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/test_data_utils.py
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/tests/speech_recognition/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A modified version of the legacy DistributedDataParallel module that uses c10d communication primitives. This version is simpler than the latest PyTorch version and is useful for debugging. Notably it does not overlap gradient communication with the backward pass, which makes it slower but more robust than the PyTorch version. This version also supports the *no_sync* context manager, which allows faster training with `--update-freq`. """ import copy from collections import OrderedDict from contextlib import contextmanager import torch from torch import nn from torch.autograd import Variable from . import distributed_utils class LegacyDistributedDataParallel(nn.Module): """Implements distributed data parallelism at the module level. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. This version uses a c10d process group for communication and does not broadcast buffers. Args: module (~torch.nn.Module): module to be parallelized process_group: the c10d process group to be used for distributed data parallel all-reduction. buffer_size (int, optional): number of elements to buffer before performing all-reduce (default: 256M). """ def __init__(self, module, process_group, buffer_size=2 ** 28): super().__init__() self.module = module self.process_group = process_group self.world_size = distributed_utils.get_world_size(self.process_group) # Never use a bigger buffer than the number of model params self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters())) self.buffer = None # We can also forcibly accumulate grads locally and only do the # all-reduce at some later time self.accumulate_grads = False # make per-device lists of parameters paramlists = OrderedDict() for param in self.module.parameters(): device = param.device if paramlists.get(device) is None: paramlists[device] = [] paramlists[device] += [param] self.per_device_params = list(paramlists.values()) def __getstate__(self): attrs = copy.copy(self.__dict__) return attrs def __setstate__(self, state): super().__setstate__(state) @contextmanager def no_sync(self): """A context manager to disable gradient synchronization.""" old_accumulate_grads = self.accumulate_grads self.accumulate_grads = True yield self.accumulate_grads = old_accumulate_grads def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce_grads(self): """ This function must be called explicitly after backward to reduce gradients. There is no automatic hook like c10d. """ def all_reduce_params(params): buffer = self.buffer nonzero_buffer = False if len(params) > 1: offset = 0 for p in params: sz = p.numel() if p.grad is not None: buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) nonzero_buffer = True else: buffer[offset : offset + sz].zero_() offset += sz else: # we only have a single grad to all-reduce p = params[0] if p.grad is not None: buffer = p.grad.data nonzero_buffer = True elif p.numel() <= self.buffer.numel(): buffer = buffer[: p.numel()] buffer.zero_() else: buffer = torch.zeros_like(p) if nonzero_buffer: buffer.div_(self.world_size) distributed_utils.all_reduce(buffer, self.process_group) # copy all-reduced grads back into their original place offset = 0 for p in params: sz = p.numel() if p.grad is not None: p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) else: p.grad = buffer[offset : offset + sz].view_as(p).clone() offset += sz def reduction_fn(): # This function only needs to be called once if self.accumulate_grads: return if self.buffer is None: self.buffer = next(self.module.parameters()).new(self.buffer_size) for params in self.per_device_params: # All-reduce the gradients in buckets offset = 0 buffered_params = [] for param in params: if not param.requires_grad: continue if param.grad is None: param.grad = torch.zeros_like(param) if param.grad.requires_grad: raise RuntimeError( "DistributedDataParallel only works " "with gradients that don't require " "grad" ) sz = param.numel() if sz > self.buffer.numel(): # all-reduce big params directly all_reduce_params([param]) else: if offset + sz > self.buffer.numel(): all_reduce_params(buffered_params) offset = 0 buffered_params.clear() buffered_params.append(param) offset += sz if len(buffered_params) > 0: all_reduce_params(buffered_params) reduction_fn()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/legacy_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from typing import Callable, List, Optional import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass # this import is for backward compatibility from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa def get_preprocessing_parser(default_task="translation"): parser = get_parser("Preprocessing", default_task) add_preprocess_args(parser) return parser def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser def get_generation_parser(interactive=False, default_task="translation"): parser = get_parser("Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) add_checkpoint_args(parser) if interactive: add_interactive_args(parser) return parser def get_interactive_generation_parser(default_task="translation"): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task="language_modeling"): parser = get_parser("Evaluate Language Model", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_eval_lm_args(parser) return parser def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") gen_parser_from_dataclass(group, CommonEvalConfig()) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonConfig()) from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( "--" + registry_name.replace("_", "-"), default=REGISTRY["default"], choices=REGISTRY["registry"].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument( "--task", metavar="TASK", default=default_task, choices=TASK_REGISTRY.keys(), help="task", ) # fmt: on return parser def add_preprocess_args(parser): group = parser.add_argument_group("Preprocessing") # fmt: off group.add_argument("-s", "--source-lang", default=None, metavar="SRC", help="source language") group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", help="target language") group.add_argument("--trainpref", metavar="FP", default=None, help="train file prefix (also used to build dictionaries)") group.add_argument("--validpref", metavar="FP", default=None, help="comma separated, valid file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--testpref", metavar="FP", default=None, help="comma separated, test file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--align-suffix", metavar="FP", default=None, help="alignment file suffix") group.add_argument("--destdir", metavar="DIR", default="data-bin", help="destination dir") group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--tgtdict", metavar="FP", help="reuse given target dictionary") group.add_argument("--srcdict", metavar="FP", help="reuse given source dictionary") group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, help="number of target words to retain") group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, help="number of source words to retain") group.add_argument("--alignfile", metavar="ALIGN", default=None, help="an alignment file (optional)") parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument("--joined-dictionary", action="store_true", help="Generate joined dictionary") group.add_argument("--only-source", action="store_true", help="Only process the source language") group.add_argument("--padding-factor", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N") group.add_argument("--workers", metavar="N", default=1, type=int, help="number of parallel workers") # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("dataset_data_loading") gen_parser_from_dataclass(group, DatasetConfig()) # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("distributed_training") if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) gen_parser_from_dataclass( group, DistributedTrainingConfig(distributed_world_size=default_world_size) ) return group def add_optimization_args(parser): group = parser.add_argument_group("optimization") # fmt: off gen_parser_from_dataclass(group, OptimizationConfig()) # fmt: on return group def add_checkpoint_args(parser): group = parser.add_argument_group("checkpoint") # fmt: off gen_parser_from_dataclass(group, CheckpointConfig()) # fmt: on return group def add_common_eval_args(group): gen_parser_from_dataclass(group, CommonEvalConfig()) def add_eval_lm_args(parser): group = parser.add_argument_group("LM Evaluation") add_common_eval_args(group) gen_parser_from_dataclass(group, EvalLMConfig()) def add_generation_args(parser): group = parser.add_argument_group("Generation") add_common_eval_args(group) gen_parser_from_dataclass(group, GenerationConfig()) return group def add_interactive_args(parser): group = parser.add_argument_group("Interactive") gen_parser_from_dataclass(group, InteractiveConfig()) def add_model_args(parser): group = parser.add_argument_group("Model configuration") # fmt: off # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture') # fmt: on return group
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import numpy as np import torch from fairseq import utils DecoderOut = namedtuple( "IterativeRefinementDecoderOut", ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], ) class IterativeRefinementGenerator(object): def __init__( self, tgt_dict, models=None, eos_penalty=0.0, max_iter=10, max_ratio=2, beam_size=1, decoding_format=None, retain_dropout=False, adaptive=True, retain_history=False, reranking=False, ): """ Generates translations based on iterative refinement. Args: tgt_dict: target dictionary eos_penalty: if > 0.0, it penalized early-stopping in decoding max_iter: maximum number of refinement iterations max_ratio: generate sequences of maximum length ax, where x is the source length decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} retain_dropout: retaining dropout in the inference adaptive: decoding with early stop """ self.bos = tgt_dict.bos() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.eos_penalty = eos_penalty self.max_iter = max_iter self.max_ratio = max_ratio self.beam_size = beam_size self.reranking = reranking self.decoding_format = decoding_format self.retain_dropout = retain_dropout self.retain_history = retain_history self.adaptive = adaptive self.models = models def generate_batched_itr( self, data_itr, maxlen_a=None, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ for sample in data_itr: if "net_input" not in sample: continue if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( self.models, sample, prefix_tokens=sample["target"][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sample["ntokens"]) for i, id in enumerate(sample["id"]): # remove padding src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) ref = utils.strip_pad(sample["target"][i, :], self.pad) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample, prefix_tokens=None, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the IterativeRefinementGenerator is not supported" ) # TODO: iterative refinement generator does not support ensemble for now. if not self.retain_dropout: for model in models: model.eval() model, reranker = models[0], None if self.reranking: assert len(models) > 1, "Assuming the last checkpoint is the reranker" assert ( self.beam_size > 1 ), "Reranking requires multiple translation for each example" reranker = models[-1] models = models[:-1] if len(models) > 1 and hasattr(model, "enable_ensemble"): assert model.allow_ensemble, "{} does not support ensembling".format( model.__class__.__name__ ) model.enable_ensemble(models) # TODO: better encoder inputs? src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size() # initialize encoder_out = model.forward_encoder([src_tokens, src_lengths]) prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) if self.beam_size > 1: assert ( model.allow_length_beam ), "{} does not support decoding with length beam.".format( model.__class__.__name__ ) # regenerate data based on length-beam length_beam_order = ( utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, length_beam_order ) prev_decoder_out = model.regenerate_length_beam( prev_decoder_out, self.beam_size ) bsz = bsz * self.beam_size sent_idxs = torch.arange(bsz) prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.retain_history: prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) finalized = [[] for _ in range(bsz)] def is_a_loop(x, y, s, a): b, l_x, l_y = x.size(0), x.size(1), y.size(1) if l_x > l_y: y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) if a is not None: a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) elif l_x < l_y: x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) return (x == y).all(1), y, s, a def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): cutoff = prev_out_token.ne(self.pad) tokens = prev_out_token[cutoff] if prev_out_score is None: scores, score = None, None else: scores = prev_out_score[cutoff] score = scores.mean() if prev_out_attn is None: hypo_attn, alignment = None, None else: hypo_attn = prev_out_attn[cutoff] alignment = hypo_attn.max(dim=1)[1] return { "steps": step, "tokens": tokens, "positional_scores": scores, "score": score, "hypo_attn": hypo_attn, "alignment": alignment, } for step in range(self.max_iter + 1): decoder_options = { "eos_penalty": self.eos_penalty, "max_ratio": self.max_ratio, "decoding_format": self.decoding_format, } prev_decoder_out = prev_decoder_out._replace( step=step, max_step=self.max_iter + 1, ) decoder_out = model.forward_decoder( prev_decoder_out, encoder_out, **decoder_options ) if self.adaptive: # terminate if there is a loop terminated, out_tokens, out_scores, out_attn = is_a_loop( prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn, ) decoder_out = decoder_out._replace( output_tokens=out_tokens, output_scores=out_scores, attn=out_attn, ) else: terminated = decoder_out.output_tokens.new_zeros( decoder_out.output_tokens.size(0) ).bool() if step == self.max_iter: # reach last iteration, terminate terminated.fill_(1) # collect finalized sentences finalized_idxs = sent_idxs[terminated] finalized_tokens = decoder_out.output_tokens[terminated] finalized_scores = decoder_out.output_scores[terminated] finalized_attn = ( None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated] ) if self.retain_history: finalized_history_tokens = [h[terminated] for h in decoder_out.history] for i in range(finalized_idxs.size(0)): finalized[finalized_idxs[i]] = [ finalized_hypos( step, finalized_tokens[i], finalized_scores[i], None if finalized_attn is None else finalized_attn[i], ) ] if self.retain_history: finalized[finalized_idxs[i]][0]["history"] = [] for j in range(len(finalized_history_tokens)): finalized[finalized_idxs[i]][0]["history"].append( finalized_hypos( step, finalized_history_tokens[j][i], None, None ) ) # check if all terminated if terminated.sum() == terminated.size(0): break # for next step not_terminated = ~terminated prev_decoder_out = decoder_out._replace( output_tokens=decoder_out.output_tokens[not_terminated], output_scores=decoder_out.output_scores[not_terminated], attn=decoder_out.attn[not_terminated] if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) else None, history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None, ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() ) sent_idxs = sent_idxs[not_terminated] prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.beam_size > 1: if reranker is not None: finalized = self.rerank( reranker, finalized, [src_tokens, src_lengths], self.beam_size ) # aggregate information from length beam finalized = [ finalized[ np.argmax( [ finalized[self.beam_size * i + j][0]["score"] for j in range(self.beam_size) ] ) + self.beam_size * i ] for i in range(len(finalized) // self.beam_size) ] return finalized def rerank(self, reranker, finalized, encoder_input, beam_size): def rebuild_batch(finalized): finalized_tokens = [f[0]["tokens"] for f in finalized] finalized_maxlen = max(f.size(0) for f in finalized_tokens) final_output_tokens = ( finalized_tokens[0] .new_zeros(len(finalized_tokens), finalized_maxlen) .fill_(self.pad) ) for i, f in enumerate(finalized_tokens): final_output_tokens[i, : f.size(0)] = f return final_output_tokens final_output_tokens = rebuild_batch(finalized) final_output_tokens[ :, 0 ] = self.eos # autoregressive model assumes starting with EOS reranker_encoder_out = reranker.encoder(*encoder_input) length_beam_order = ( utils.new_arange( final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) ) .t() .reshape(-1) ) reranker_encoder_out = reranker.encoder.reorder_encoder_out( reranker_encoder_out, length_beam_order ) reranking_scores = reranker.get_normalized_probs( reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None, ) reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) reranking_masks = final_output_tokens[:, 1:].ne(self.pad) reranking_scores = ( reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) ) reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( reranking_scores ) for i in range(len(finalized)): finalized[i][0]["score"] = reranking_scores[i] return finalized
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/iterative_refinement_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name """ def __init__(self, model, forward=True, backward=True): self.bhooks = [] self.fhooks = [] self.forward = forward self.backward = backward self.named_parameters = list(model.named_parameters()) self.reset() for name, mod in model.named_modules(): mod.__module_name = name self.add_hooks(mod) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): # Dump out all model gnorms to enable better debugging norm = {} gradients = {} for name, param in self.named_parameters: if param.grad is not None: grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32) norm[name] = grad_norm.item() if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any(): gradients[name] = param.grad.data if len(gradients) > 0: logger.info("Detected nan/inf grad norm, dumping norms...") logger.info(f"norms: {norm}") logger.info(f"gradients: {gradients}") self.close() def add_hooks(self, module): if self.forward: self.fhooks.append(module.register_forward_hook(self.fhook_fn)) if self.backward: self.bhooks.append(module.register_backward_hook(self.bhook_fn)) def reset(self): self.has_printed_f = False self.has_printed_b = False def _detect(self, tensor, name, backward): err = None if ( torch.is_floating_point(tensor) # single value tensors (like the loss) will not provide much info and tensor.numel() >= 2 ): with torch.no_grad(): if torch.isnan(tensor).any(): err = "NaN" elif torch.isinf(tensor).any(): err = "Inf" if err is not None: err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}" return err def _apply(self, module, inp, x, backward): if torch.is_tensor(x): if isinstance(inp, tuple) and len(inp) > 0: inp = inp[0] err = self._detect(x, module.__module_name, backward) if err is not None: if torch.is_tensor(inp) and not backward: err += ( f" input max: {inp.max().item()}, input min: {inp.min().item()}" ) has_printed_attr = "has_printed_b" if backward else "has_printed_f" logger.warning(err) setattr(self, has_printed_attr, True) elif isinstance(x, dict): for v in x.values(): self._apply(module, inp, v, backward) elif isinstance(x, list) or isinstance(x, tuple): for v in x: self._apply(module, inp, v, backward) def fhook_fn(self, module, inp, output): if not self.has_printed_f: self._apply(module, inp, output, backward=False) def bhook_fn(self, module, inp, output): if not self.has_printed_b: self._apply(module, inp, output, backward=True) def close(self): for hook in self.fhooks + self.bhooks: hook.remove()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/nan_detector.py
# Originally from Microsoft Corporation. # Licensed under the MIT License. """ Wrapper for ngram_repeat_block cuda extension """ import torch from torch import nn import math from typing import Dict, List, Optional import warnings try: from fairseq import ngram_repeat_block_cuda EXTENSION_BUILT = True except ImportError: EXTENSION_BUILT = False def is_cuda_extension_usable() -> bool: """Check whether ngram_repeat_block_cuda is built properly""" if not EXTENSION_BUILT: return False bsz = 2 tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") lprobs = torch.rand((8, 12), device="cuda") try: outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) outputs = outputs + 4 # This line breaks if the extension is built incorrectly. return True except RuntimeError: warnings.warn( "NGramRepeatBlock extension must be rebuilt." 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' ) return False class NGramRepeatBlock(nn.Module): """ Wrapper class for calling ngram_repeat_block cuda extension """ def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True): super().__init__() self.use_extension = is_cuda_extension_usable() if use_extension else False self.no_repeat_ngram_size = no_repeat_ngram_size def reset_parameters(self): pass @torch.jit.unused def call_cuda_extension( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): return ngram_repeat_block_cuda.forward( tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size ) def forward( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): """ Args: tokens(Tensor): Input tokens(Bsz*beam, seq_len) lprobs(Tensor): likelihood probability, Expected to be updated in place.(Bsz*beam, vocab_size) bsz(int): batch size step(int): current step beam_size(int): beam size no_repeat_ngram_size(int): Ngram size """ msg = f"expected {bsz *beam_size} got" assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}" assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}" if self.use_extension: return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step) else: return self._no_repeat_ngram( tokens, lprobs, bsz, beam_size, step, ) def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf""" gen_ngrams: List[Dict[str, List[int]]] = [ torch.jit.annotate(Dict[str, List[int]], {}) for bbsz_idx in range(bsz * beam_size) ] cpu_tokens = tokens.cpu() for bbsz_idx in range(bsz * beam_size): gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist() for ngram in self.transpose_list( [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)] ): key = ",".join([str(x) for x in ngram[:-1]]) gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get( key, torch.jit.annotate(List[int], []) ) + [ngram[-1]] if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [ self.calculate_banned_tokens( tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx ) for bbsz_idx in range(bsz * beam_size) ] else: banned_tokens = [ torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) ] for bbsz_idx in range(bsz * beam_size): lprobs[bbsz_idx][ torch.tensor(banned_tokens[bbsz_idx]).long() ] = torch.tensor(-math.inf).to(lprobs) return lprobs @staticmethod def calculate_banned_tokens( tokens, step: int, gen_ngrams: List[Dict[str, List[int]]], no_repeat_ngram_size: int, bbsz_idx: int, ): tokens_list: List[int] = tokens[ bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1 ].tolist() # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = ",".join([str(x) for x in tokens_list]) return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], [])) @staticmethod def transpose_list(l: List[List[int]]): # GeneratorExp aren't supported in TS so ignoring the lint min_len = min([len(x) for x in l]) # noqa l2 = [[row[i] for row in l] for i in range(min_len)] return l2
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/ngram_repeat_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace from typing import Union from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import populate_dataclass, merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import DictConfig REGISTRIES = {} def setup_registry(registry_name: str, base_class=None, default=None, required=False): assert registry_name.startswith("--") registry_name = registry_name[2:].replace("-", "_") REGISTRY = {} REGISTRY_CLASS_NAMES = set() DATACLASS_REGISTRY = {} # maintain a registry of all registries if registry_name in REGISTRIES: return # registry already exists REGISTRIES[registry_name] = { "registry": REGISTRY, "default": default, "dataclass_registry": DATACLASS_REGISTRY, } def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs): if isinstance(cfg, DictConfig): choice = cfg._name if choice and choice in DATACLASS_REGISTRY: dc = DATACLASS_REGISTRY[choice] cfg = merge_with_parent(dc(), cfg) elif isinstance(cfg, str): choice = cfg if choice in DATACLASS_REGISTRY: cfg = DATACLASS_REGISTRY[choice]() else: choice = getattr(cfg, registry_name, None) if choice in DATACLASS_REGISTRY: cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg) if choice is None: if required: raise ValueError("{} is required!".format(registry_name)) return None cls = REGISTRY[choice] if hasattr(cls, "build_" + registry_name): builder = getattr(cls, "build_" + registry_name) else: builder = cls return builder(cfg, *extra_args, **extra_kwargs) def register_x(name, dataclass=None): def register_x_cls(cls): if name in REGISTRY: raise ValueError( "Cannot register duplicate {} ({})".format(registry_name, name) ) if cls.__name__ in REGISTRY_CLASS_NAMES: raise ValueError( "Cannot register {} with duplicate class name ({})".format( registry_name, cls.__name__ ) ) if base_class is not None and not issubclass(cls, base_class): raise ValueError( "{} must extend {}".format(cls.__name__, base_class.__name__) ) if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if cls.__dataclass is not None: DATACLASS_REGISTRY[name] = cls.__dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group=registry_name, node=node, provider="fairseq") REGISTRY[name] = cls return cls return register_x_cls return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/registry.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import os import sys try: from .version import __version__ # noqa except ImportError: version_txt = os.path.join(os.path.dirname(__file__), "version.txt") with open(version_txt) as f: __version__ = f.read().strip() __all__ = ["pdb"] # backwards compatibility to support `from fairseq.meters import AverageMeter` from fairseq.logging import meters, metrics, progress_bar # noqa sys.modules["fairseq.meters"] = meters sys.modules["fairseq.metrics"] = metrics sys.modules["fairseq.progress_bar"] = progress_bar # initialize hydra from fairseq.dataclass.initialize import hydra_init hydra_init() import fairseq.criterions # noqa import fairseq.models # noqa import fairseq.modules # noqa import fairseq.optim # noqa import fairseq.optim.lr_scheduler # noqa import fairseq.pdb # noqa import fairseq.scoring # noqa import fairseq.tasks # noqa import fairseq.token_generation_constraints # noqa import fairseq.benchmark # noqa import fairseq.model_parallel # noqa
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import search, utils from fairseq.data import data_utils from fairseq.models import FairseqIncrementalDecoder from torch import Tensor from fairseq.ngram_repeat_block import NGramRepeatBlock class SequenceGenerator(nn.Module): def __init__( self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None, symbols_to_strip_from_output=None, lm_model=None, lm_weight=1.0, ): """Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) """ super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.tgt_dict = tgt_dict self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() if eos is None else eos self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size) assert temperature > 0, "--temperature must be greater than 0" self.search = ( search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy ) # We only need to set src_lengths in LengthConstrainedBeamSearch. # As a module attribute, setting it would break in multithread # settings when the model is shared. self.should_set_src_lengths = ( hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths ) self.model.eval() self.lm_model = lm_model self.lm_weight = lm_weight if self.lm_model is not None: self.lm_model.eval() def cuda(self): self.model.cuda() return self @torch.no_grad() def forward( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, bos_token: Optional[int] = None, ): """Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, prefix_tokens, bos_token=bos_token) # TODO(myleott): unused, deprecate after pytorch-translate migration def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): """Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations """ for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if "net_input" not in s: continue input = s["net_input"] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != "prev_output_tokens" } if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if timer is not None: timer.stop(sum(len(h[0]["tokens"]) for h in hypos)) for i, id in enumerate(s["id"].data): # remove padding src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad) ref = ( utils.strip_pad(s["target"].data[i, :], self.pad) if s["target"] is not None else None ) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs): """Generate translations. Match the api of other fairseq generators. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens constraints (torch.LongTensor, optional): force decoder to include the list of constraints bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, **kwargs) def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, constraints: Optional[Tensor] = None, bos_token: Optional[int] = None, ): incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(self.model.models_size) ], ) net_input = sample["net_input"] if "src_tokens" in net_input: src_tokens = net_input["src_tokens"] # length of the source text being the character length except EndOfSentence and pad src_lengths = ( (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) ) elif "source" in net_input: src_tokens = net_input["source"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) else: raise Exception("expected src_tokens or source in net input") # bsz: total number of sentences in beam # Note that src_tokens may have more than 2 dimenions (i.e. audio features) bsz, src_len = src_tokens.size()[:2] beam_size = self.beam_size if constraints is not None and not self.search.supports_constraints: raise NotImplementedError( "Target-side constraints were provided, but search method doesn't support them" ) # Initialize constraints, when active self.search.init_constraints(constraints, beam_size) max_len: int = -1 if self.match_source_len: max_len = src_lengths.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker self.model.max_decoder_positions() - 1, ) assert ( self.min_len <= max_len ), "min_len cannot be larger than max_len, please adjust these!" # compute the encoder output for each beam encoder_outs = self.model.forward_encoder(net_input) # placeholder of indices for bsz * beam_size to hold tokens and accumulative scores new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order) # ensure encoder_outs is a List. assert encoder_outs is not None # initialize buffers scores = ( torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float() ) # +1 for eos; pad is never chosen for scoring tokens = ( torch.zeros(bsz * beam_size, max_len + 2) .to(src_tokens) .long() .fill_(self.pad) ) # +2 for eos and pad tokens[:, 0] = self.eos if bos_token is None else bos_token attn: Optional[Tensor] = None # A list that indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = ( torch.zeros(bsz, beam_size).to(src_tokens).eq(-1) ) # forward and backward-compatible False mask # list of completed sentences finalized = torch.jit.annotate( List[List[Dict[str, Tensor]]], [torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)], ) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step finished = [ False for i in range(bsz) ] # a boolean array indicating if the sentence at the index is finished or not num_remaining_sent = bsz # number of sentences remaining # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = ( (torch.arange(0, bsz) * beam_size) .unsqueeze(1) .type_as(tokens) .to(src_tokens.device) ) cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device) reorder_state: Optional[Tensor] = None batch_idxs: Optional[Tensor] = None original_batch_idxs: Optional[Tensor] = None if "id" in sample and isinstance(sample["id"], Tensor): original_batch_idxs = sample["id"] else: original_batch_idxs = torch.arange(0, bsz).type_as(tokens) for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as( batch_idxs ) reorder_state.view(-1, beam_size).add_( corr.unsqueeze(-1) * beam_size ) original_batch_idxs = original_batch_idxs[batch_idxs] self.model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = self.model.reorder_encoder_out( encoder_outs, reorder_state ) lprobs, avg_attn_scores = self.model.forward_decoder( tokens[:, : step + 1], encoder_outs, incremental_states, self.temperature, ) if self.lm_model is not None: lm_out = self.lm_model(tokens[:, : step + 1]) probs = self.lm_model.get_normalized_probs( lm_out, log_probs=True, sample=None ) probs = probs[:, -1, :] * self.lm_weight lprobs += probs lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # handle max length constraint if step >= max_len: lprobs[:, : self.eos] = -math.inf lprobs[:, self.eos + 1 :] = -math.inf # handle prefix tokens (possibly with different lengths) if ( prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len ): lprobs, tokens, scores = self._prefix_tokens( step, lprobs, scores, tokens, prefix_tokens, beam_size ) elif step < self.min_len: # minimum length constraint (does not apply if using prefix_tokens) lprobs[:, self.eos] = -math.inf # Record attention scores, only support avg_attn_scores is a Tensor if avg_attn_scores is not None: if attn is None: attn = torch.empty( bsz * beam_size, avg_attn_scores.size(1), max_len + 2 ).to(scores) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) eos_bbsz_idx = torch.empty(0).to( tokens ) # indices of hypothesis ending with eos (finished sentences) eos_scores = torch.empty(0).to( scores ) # scores of hypothesis ending with eos (finished sentences) if self.should_set_src_lengths: self.search.set_src_lengths(src_lengths) if self.no_repeat_ngram_size > 0: lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step) # Shape: (batch, cand_size) cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], tokens[:, : step + 1], original_batch_idxs, ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos # Shape of eos_mask: (batch size, beam size) eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf) eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask) # only consider eos when it's among the top beam_size indices # Now we know what beam item(s) to finish # Shape: 1d list of absolute-numbered eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents: List[int] = [] if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = self.finalize_hypos( step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, attn, src_lengths, max_len, ) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if self.search.stop_on_max_len and step >= max_len: break assert step < max_len, f"{step} < {max_len}" # Remove finalized sentences (ones for which {beam_size} # finished hypotheses have been generated) from the batch. if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = torch.ones( bsz, dtype=torch.bool, device=cand_indices.device ) batch_mask[finalized_sents] = False # TODO replace `nonzero(as_tuple=False)` after TorchScript supports it batch_idxs = torch.arange( bsz, device=cand_indices.device ).masked_select(batch_mask) # Choose the subset of the hypothesized constraints that will continue self.search.prune_sentences(batch_idxs) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths = src_lengths[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view( new_bsz * beam_size, attn.size(1), -1 ) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos # Rewrite the operator since the element wise or is not supported in torchscript. eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size])) active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just # the hypos with the smallest values in active_mask. # {active_hypos} indicates which {beam_size} hypotheses # from the list of {2 * beam_size} candidates were # selected. Shapes: (batch size, beam size) new_cands_to_ignore, active_hypos = torch.topk( active_mask, k=beam_size, dim=1, largest=False ) # update cands_to_ignore to ignore any finalized hypos. cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] # Make sure there is at least one active item for each sentence in the batch. assert (~cands_to_ignore).any(dim=1).all() # update cands_to_ignore to ignore any finalized hypos # {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam # can be selected more than once). active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos) active_scores = torch.gather(cand_scores, dim=1, index=active_hypos) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses # Set the tokens for each beam (can select the same row more than once) tokens[:, : step + 1] = torch.index_select( tokens[:, : step + 1], dim=0, index=active_bbsz_idx ) # Select the next token for each of them tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather( cand_indices, dim=1, index=active_hypos ) if step > 0: scores[:, :step] = torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx ) scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather( cand_scores, dim=1, index=active_hypos ) # Update constraints based on which candidates were selected for the next beam self.search.update_constraints(active_hypos) # copy attention for active hypotheses if attn is not None: attn[:, :, : step + 2] = torch.index_select( attn[:, :, : step + 2], dim=0, index=active_bbsz_idx ) # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): scores = torch.tensor( [float(elem["score"].item()) for elem in finalized[sent]] ) _, sorted_scores_indices = torch.sort(scores, descending=True) finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices] finalized[sent] = torch.jit.annotate( List[Dict[str, Tensor]], finalized[sent] ) return finalized def _prefix_tokens( self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int ): """Handle prefix tokens""" prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1)) prefix_mask = prefix_toks.ne(self.pad) lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs) lprobs[prefix_mask] = lprobs[prefix_mask].scatter( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask] ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[ :, 0, 1 : step + 1 ] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() # copy tokens, scores and lprobs from the first beam to all beams tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size) scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size) lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size) return lprobs, tokens, scores def replicate_first_beam(self, tensor, mask, beam_size: int): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) def finalize_hypos( self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[str, Tensor]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int, ): """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly. A sentence is finalized when {beam_size} finished items have been collected for it. Returns number of sentences (not beam items) being finalized. These will be removed from the batch and not processed further. Args: bbsz_idx (Tensor): """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors. # tokens is (batch * beam, max_len). So the index_select # gets the newly EOS rows, then selects cols 1..{step + 2} tokens_clone = tokens.index_select(0, bbsz_idx)[ :, 1 : step + 2 ] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = ( attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2] if attn is not None else None ) # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty # cum_unfin records which sentences in the batch are finished. # It helps match indexing between (a) the original sentences # in the batch and (b) the current, possibly-reduced set of # sentences. cum_unfin: List[int] = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) # The keys here are of the form "{sent}_{unfin_idx}", where # "unfin_idx" is the index in the current (possibly reduced) # list of sentences, and "sent" is the index in the original, # unreduced batch # set() is not supported in script export sents_seen: Dict[str, Optional[Tensor]] = {} # For every finished beam item for i in range(bbsz_idx.size()[0]): idx = bbsz_idx[i] score = eos_scores[i] # sentence index in the current (possibly reduced) batch unfin_idx = idx // beam_size # sentence index in the original (unreduced) batch sent = unfin_idx + cum_unfin[unfin_idx] # Cannot create dict for key type '(int, int)' in torchscript. # The workaround is to cast int to string seen = str(sent.item()) + "_" + str(unfin_idx.item()) if seen not in sents_seen: sents_seen[seen] = None if self.match_source_len and step > src_lengths[unfin_idx]: score = torch.tensor(-math.inf).to(score) # An input sentence (among those in a batch) is finished when # beam_size hypotheses have been collected for it if len(finalized[sent]) < beam_size: if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i] else: hypo_attn = torch.empty(0) finalized[sent].append( { "tokens": tokens_clone[i], "score": score, "attention": hypo_attn, # src_len x tgt_len "alignment": torch.empty(0), "positional_scores": pos_scores[i], } ) newly_finished: List[int] = [] for seen in sents_seen.keys(): # check termination conditions for this sentence sent: int = int(float(seen.split("_")[0])) unfin_idx: int = int(float(seen.split("_")[1])) if not finished[sent] and self.is_finished( step, unfin_idx, max_len, len(finalized[sent]), beam_size ): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def is_finished( self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int, ): """ Check whether decoding for a sentence is finished, which occurs when the list of finalized sentences has reached the beam size, or when we reach the maximum length. """ assert finalized_sent_len <= beam_size if finalized_sent_len == beam_size or step == max_len: return True return False class EnsembleModel(nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models_size = len(models) # method '__len__' is not supported in ModuleList for torch script self.single_model = models[0] self.models = nn.ModuleList(models) self.has_incremental: bool = False if all( hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models ): self.has_incremental = True def forward(self): pass def has_encoder(self): return hasattr(self.single_model, "encoder") def has_incremental_states(self): return self.has_incremental def max_decoder_positions(self): return min([m.max_decoder_positions() for m in self.models]) @torch.jit.export def forward_encoder(self, net_input: Dict[str, Tensor]): if not self.has_encoder(): return None return [model.encoder.forward_torchscript(net_input) for model in self.models] @torch.jit.export def forward_decoder( self, tokens, encoder_outs: List[Dict[str, List[Tensor]]], incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], temperature: float = 1.0, ): log_probs = [] avg_attn: Optional[Tensor] = None encoder_out: Optional[Dict[str, List[Tensor]]] = None for i, model in enumerate(self.models): if self.has_encoder(): encoder_out = encoder_outs[i] # decode each model if self.has_incremental_states(): decoder_out = model.decoder.forward( tokens, encoder_out=encoder_out, incremental_state=incremental_states[i], ) else: decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out) attn: Optional[Tensor] = None decoder_len = len(decoder_out) if decoder_len > 1 and decoder_out[1] is not None: if isinstance(decoder_out[1], Tensor): attn = decoder_out[1] else: attn_holder = decoder_out[1]["attn"] if isinstance(attn_holder, Tensor): attn = attn_holder elif attn_holder is not None: attn = attn_holder[0] if attn is not None: attn = attn[:, -1, :] decoder_out_tuple = ( decoder_out[0][:, -1:, :].div_(temperature), None if decoder_len <= 1 else decoder_out[1], ) probs = model.get_normalized_probs( decoder_out_tuple, log_probs=True, sample=None ) probs = probs[:, -1, :] if self.models_size == 1: return probs, attn log_probs.append(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log( self.models_size ) if avg_attn is not None: avg_attn.div_(self.models_size) return avg_probs, avg_attn @torch.jit.export def reorder_encoder_out( self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order ): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_outs: List[Dict[str, List[Tensor]]] = [] if not self.has_encoder(): return new_outs for i, model in enumerate(self.models): assert encoder_outs is not None new_outs.append( model.encoder.reorder_encoder_out(encoder_outs[i], new_order) ) return new_outs @torch.jit.export def reorder_incremental_state( self, incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): if not self.has_incremental_states(): return for i, model in enumerate(self.models): model.decoder.reorder_incremental_state_scripting( incremental_states[i], new_order ) class SequenceGeneratorWithAlignment(SequenceGenerator): def __init__( self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs ): """Generates translations of a given source sentence. Produces alignments following "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: left_pad_target (bool, optional): Whether or not the hypothesis should be left padded or not when they are teacher forced for generating alignments. """ super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs) self.left_pad_target = left_pad_target if print_alignment == "hard": self.extract_alignment = utils.extract_hard_alignment elif print_alignment == "soft": self.extract_alignment = utils.extract_soft_alignment @torch.no_grad() def generate(self, models, sample, **kwargs): finalized = super()._generate(sample, **kwargs) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] beam_size = self.beam_size ( src_tokens, src_lengths, prev_output_tokens, tgt_tokens, ) = self._prepare_batch_for_alignment(sample, finalized) if any(getattr(m, "full_context_alignment", False) for m in self.model.models): attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens) else: attn = [ finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0) for i in range(bsz * beam_size) ] if src_tokens.device != "cpu": src_tokens = src_tokens.to("cpu") tgt_tokens = tgt_tokens.to("cpu") attn = [i.to("cpu") for i in attn] # Process the attn matrix to extract hard alignments. for i in range(bsz * beam_size): alignment = self.extract_alignment( attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos ) finalized[i // beam_size][i % beam_size]["alignment"] = alignment return finalized def _prepare_batch_for_alignment(self, sample, hypothesis): src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] src_tokens = ( src_tokens[:, None, :] .expand(-1, self.beam_size, -1) .contiguous() .view(bsz * self.beam_size, -1) ) src_lengths = sample["net_input"]["src_lengths"] src_lengths = ( src_lengths[:, None] .expand(-1, self.beam_size) .contiguous() .view(bsz * self.beam_size) ) prev_output_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True, ) tgt_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False, ) return src_tokens, src_lengths, prev_output_tokens, tgt_tokens class EnsembleModelWithAlignment(EnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) def forward_align(self, src_tokens, src_lengths, prev_output_tokens): avg_attn = None for model in self.models: decoder_out = model(src_tokens, src_lengths, prev_output_tokens) attn = decoder_out[1]["attn"][0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(self.models) > 1: avg_attn.div_(len(self.models)) return avg_attn
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import multiprocessing import os import pdb import sys __all__ = ["set_trace"] _stdin = [None] _stdin_lock = multiprocessing.Lock() try: _stdin_fd = sys.stdin.fileno() except Exception: _stdin_fd = None class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if _stdin_fd is not None: if not _stdin[0]: _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/pdb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line.split()
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/tokenizer.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import os from typing import Any, Dict, Iterator, List import torch from fairseq import utils from fairseq.data import encoders from omegaconf import open_dict from torch import nn logger = logging.getLogger(__name__) def from_pretrained( model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == "checkpoint_file": checkpoint_file = v elif ( k != "path" # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path["path"] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith("."): kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs["data"] = file_utils.load_archive_file(data_name_or_path) for file, arg in { "code": "bpe_codes", "bpecodes": "bpe_codes", "sentencepiece.bpe.model": "sentencepiece_model", "merges.txt": "bpe_merges", "vocab.json": "bpe_vocab", }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if "user_dir" in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"])) models, args, task = checkpoint_utils.load_model_ensemble_and_task( [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs, ) return { "args": args, "task": task, "models": models, } class GeneratorHubInterface(nn.Module): """ PyTorch Hub interface for generating sequences from a pre-trained translation or language model. """ def __init__(self, cfg, task, models): super().__init__() self.cfg = cfg self.task = task self.models = nn.ModuleList(models) self.src_dict = task.source_dictionary self.tgt_dict = task.target_dictionary # optimize model for generation for model in self.models: model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) self.align_dict = utils.load_align_dict(cfg.generation.replace_unk) self.tokenizer = encoders.build_tokenizer(cfg.tokenizer) self.bpe = encoders.build_bpe(cfg.bpe) self.max_positions = utils.resolve_max_positions( self.task.max_positions(), *[model.max_positions() for model in models] ) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def translate( self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs ) -> List[str]: return self.sample(sentences, beam, verbose, **kwargs) def sample( self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs ) -> List[str]: if isinstance(sentences, str): return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0] tokenized_sentences = [self.encode(sentence) for sentence in sentences] batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs) return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos] def score(self, sentences: List[str], **kwargs): if isinstance(sentences, str): return self.score([sentences], **kwargs)[0] # NOTE: this doesn't support translation tasks currently tokenized_sentences = [self.encode(sentence) for sentence in sentences] return [ hypos[0] for hypos in self.generate( tokenized_sentences, score_reference=True, **kwargs ) ] def generate( self, tokenized_sentences: List[torch.LongTensor], beam: int = 5, verbose: bool = False, skip_invalid_size_inputs=False, inference_step_args=None, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1: return self.generate( tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs )[0] # build generator using current args as well as any kwargs gen_args = copy.deepcopy(self.cfg.generation) with open_dict(gen_args): gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator(self.models, gen_args) inference_step_args = inference_step_args or {} results = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): batch = utils.apply_to_sample(lambda t: t.to(self.device), batch) translations = self.task.inference_step( generator, self.models, batch, **inference_step_args ) for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) # sort output to match input order outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] if verbose: def getarg(name, default): return getattr(gen_args, name, getattr(self.cfg, name, default)) for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs): src_str_with_unk = self.string(source_tokens) logger.info("S\t{}".format(src_str_with_unk)) for hypo in target_hypotheses: hypo_str = self.decode(hypo["tokens"]) logger.info("H\t{}\t{}".format(hypo["score"], hypo_str)) logger.info( "P\t{}".format( " ".join( map( lambda x: "{:.4f}".format(x), hypo["positional_scores"].tolist(), ) ) ) ) if hypo["alignment"] is not None and getarg( "print_alignment", False ): logger.info( "A\t{}".format( " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo["alignment"] ] ) ) ) return outputs def encode(self, sentence: str) -> torch.LongTensor: sentence = self.tokenize(sentence) sentence = self.apply_bpe(sentence) return self.binarize(sentence) def decode(self, tokens: torch.LongTensor) -> str: sentence = self.string(tokens) sentence = self.remove_bpe(sentence) return self.detokenize(sentence) def tokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.encode(sentence) return sentence def detokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.decode(sentence) return sentence def apply_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.encode(sentence) return sentence def remove_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.decode(sentence) return sentence def binarize(self, sentence: str) -> torch.LongTensor: return self.src_dict.encode_line(sentence, add_if_not_exist=False).long() def string(self, tokens: torch.LongTensor) -> str: return self.tgt_dict.string(tokens) def _build_batches( self, tokens: List[List[int]], skip_invalid_size_inputs: bool ) -> Iterator[Dict[str, Any]]: lengths = torch.LongTensor([t.numel() for t in tokens]) batch_iterator = self.task.get_batch_iterator( dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, disable_iterator_cache=True, ).next_epoch_itr(shuffle=False) return batch_iterator class BPEHubInterface(object): """PyTorch Hub interface for Byte-Pair Encoding (BPE).""" def __init__(self, bpe, **kwargs): super().__init__() args = argparse.Namespace(bpe=bpe, **kwargs) self.bpe = encoders.build_bpe(args) assert self.bpe is not None def encode(self, sentence: str) -> str: return self.bpe.encode(sentence) def decode(self, sentence: str) -> str: return self.bpe.decode(sentence) class TokenizerHubInterface(object): """PyTorch Hub interface for tokenization.""" def __init__(self, tokenizer, **kwargs): super().__init__() args = argparse.Namespace(tokenizer=tokenizer, **kwargs) self.tokenizer = encoders.build_tokenizer(args) assert self.tokenizer is not None def encode(self, sentence: str) -> str: return self.tokenizer.encode(sentence) def decode(self, sentence: str) -> str: return self.tokenizer.decode(sentence)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/hub_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid from typing import Dict, Optional from torch import Tensor class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: str) -> str: return "{}.{}".format(self._incremental_state_id, key) def get_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple( b for b in cls.__bases__ if b != FairseqIncrementalState ) return cls
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/incremental_decoding_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.data import iterators from fairseq.file_io import PathManager from fairseq.logging.meters import safe_round from fairseq.modules import gelu, gelu_accurate from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False try: import torch_xla.core.xla_model as xm except ImportError: xm = None logger = logging.getLogger(__name__) MANIFOLD_PATH_SEP = "|" class FileContentsAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(FileContentsAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if PathManager.isfile(values): with PathManager.open(values) as f: argument = f.read().strip() else: argument = values setattr(namespace, self.dest, argument) def split_paths(paths: str) -> List[str]: return ( paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) ) def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task ) def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample) def get_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key) def set_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None, ): hypo_str = tgt_dict.string( hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore ) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm( chunk_size, has_inf, [cur_device_grads], False ) norms.append(norm[0].to(torch.cuda.current_device())) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm @torch.no_grad() def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.0) else: return torch.tensor(0.0) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads: g.mul_(clip_coef) return total_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t) def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)): fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: raise FileNotFoundError(module_path) # ensure that user modules are only imported once import_user_module.memo = getattr(import_user_module, "memo", set()) if module_path not in import_user_module.memo: import_user_module.memo.add(module_path) module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) else: raise ImportError( "Failed to import --user-dir={} because the corresponding module name " "({}) is not globally unique. Please rename the directory to " "something unique and try again.".format(module_path, module_name) ) def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32) def get_perplexity(loss, round=2, base=2): if loss is None: return 0.0 try: return safe_round(base ** loss, round) except OverflowError: return float("inf") def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def get_activation_fn(activation: str) -> Callable: """ Returns the activation function corresponding to `activation` """ if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ] @contextlib.contextmanager def model_eval(model): is_training = model.training model.eval() yield model.train(is_training) def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False def get_rng_state(): state = {"torch_rng_state": torch.get_rng_state()} if xm is not None: state["xla_rng_state"] = xm.get_rng_state() if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(state): torch.set_rng_state(state["torch_rng_state"]) if xm is not None: xm.set_rng_state(state["xla_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = get_rng_state() torch.manual_seed(seed) if xm is not None: xm.set_rng_state(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def __enter__(self): return self def __exit__(self, *exc): set_rng_state(self.rng_state) def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_invalid = ( ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad)).nonzero(as_tuple=False) ) src_valid = ( ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1) ) alignment = [] if len(tgt_valid) != 0 and len(src_valid) != 0: attn_valid = attn[tgt_valid, src_valid] alignment = [ ["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid ] return alignment def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def get_tpu_device(): return xm.xla_device() def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), ) class CudaEnvironment(object): def __init__(self): cur_device = torch.cuda.current_device() prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device)) self.name = prop.name self.major = prop.major self.minor = prop.minor self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024 @staticmethod def pretty_print_cuda_env_list(cuda_env_list): """ Given a list of CudaEnviorments, pretty print them """ num_workers = len(cuda_env_list) center = "CUDA enviroments for all {} workers".format(num_workers) banner_len = 40 - len(center) // 2 first_line = "*" * banner_len + center + "*" * banner_len logger.info(first_line) for r, env in enumerate(cuda_env_list): logger.info( "rank {:3d}: ".format(r) + "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor) + "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB) + "name = {:40s}".format(env.name) ) logger.info(first_line) def csv_str_list(x): return x.split(",") def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import collections import contextlib import logging import os import re import traceback from collections import OrderedDict from typing import Any, Dict, Optional, Union import torch from fairseq.dataclass.configs import CheckpointConfig, FairseqConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig, open_dict logger = logging.getLogger(__name__) def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): from fairseq import meters # only one worker should attempt to create the required dir if cfg.distributed_rank == 0: os.makedirs(cfg.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) if val_loss is not None: best_function = max if cfg.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if cfg.no_save: return trainer.consolidate_optimizer() if not trainer.is_data_parallel_master: return write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") def is_better(a, b): return a >= b if cfg.maximize_best_checkpoint_metric else a <= b suffix = cfg.checkpoint_suffix or "" checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and cfg.save_interval_updates > 0 and updates % cfg.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and cfg.keep_best_checkpoints > 0: checkpoint_conds[ "checkpoint.best_{}_{:.2f}.pt".format(cfg.best_checkpoint_metric, val_loss) ] = not hasattr(save_checkpoint, "best") or is_better( val_loss, save_checkpoint.best ) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not cfg.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) checkpoints = [ os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: assert PathManager.copy( checkpoints[0], cp, overwrite=True ), f"Failed to copy {checkpoints[0]} to {cp}" write_timer.stop() logger.info( "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and cfg.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt" ) for old_chk in checkpoints[cfg.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) if cfg.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths(cfg.save_dir, pattern=r"checkpoint(\d+)\.pt") for old_chk in checkpoints[cfg.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) if cfg.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format( cfg.best_checkpoint_metric ), ) if not cfg.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[cfg.keep_best_checkpoints :]: if os.path.lexists(old_chk): os.remove(old_chk) def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if cfg.finetune_from_model is not None and ( reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader ): raise ValueError( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" ) suffix = cfg.checkpoint_suffix if ( cfg.restore_file == "checkpoint_last.pt" ): # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join( cfg.save_dir, "checkpoint_last{}.pt".format(suffix) ) first_launch = not PathManager.exists(checkpoint_path) if cfg.finetune_from_model is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(cfg.finetune_from_model): checkpoint_path = cfg.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info( f"loading pretrained model from {checkpoint_path}: " "optimizer, lr scheduler, meters, dataloader will be reset" ) else: raise ValueError( f"--funetune-from-model {cfg.finetune_from_model} does not exist" ) elif cfg.model_parallel_size > 1: checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = cfg.restore_file if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: raise ValueError( "--finetune-from-model and --restore-file (non-default value) " "can not be specified together: " + str(cfg) ) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): """Loads a checkpoint to CPU (with upgrading for backward compatibility). If doing single-GPU training or if the checkpoint is only being loaded by at most one process on each node (current default behavior is for only rank 0 to read the checkpoint from disk), load_on_all_ranks should be False to avoid errors from torch.distributed not having been initialized or torch.distributed.barrier() hanging. If all processes on each node may be loading the checkpoint simultaneously, load_on_all_ranks should be set to True to avoid I/O conflicts. There's currently no support for > 1 but < all processes loading the checkpoint on each node. """ local_path = PathManager.get_local_path(path) # The locally cached file returned by get_local_path() may be stale for # remote files that are periodically updated/overwritten (ex: # checkpoint_last.pt) - so we remove the local copy, sync across processes # (if needed), and then download a fresh copy. if local_path != path and PathManager.path_requires_pathmanager(path): try: os.remove(local_path) except FileNotFoundError: # With potentially multiple processes removing the same file, the # file being missing is benign (missing_ok isn't available until # Python 3.8). pass if load_on_all_ranks: torch.distributed.barrier() local_path = PathManager.get_local_path(path) with open(local_path, "rb") as f: state = torch.load(f, map_location=torch.device("cpu")) if "args" in state and state["args"] is not None and arg_overrides is not None: args = state["args"] for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) if "cfg" in state and state["cfg"] is not None and arg_overrides is not None: overwrite_args_by_name(state["cfg"], arg_overrides) state = _upgrade_state_dict(state) return state def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, num_shards, state, ) return ensemble, args def load_model_ensemble_and_task( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): assert state is None or len(filenames) == 1 from fairseq import tasks assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble = [] cfg = None for filename in filenames: orig_filename = filename assert num_shards > 0 for shard_idx in range(num_shards): if num_shards == 1: filename = filename.replace(".pt", suffix + ".pt") else: filename = orig_filename[:-3] + f"_part{shard_idx}.pt" if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) if state is None: state = load_checkpoint_to_cpu(filename, arg_overrides) if "args" in state and state["args"] is not None: cfg = convert_namespace_to_omegaconf(state["args"]) elif "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] else: raise RuntimeError( f"Neither args nor cfg exist in state keys = {state.keys()}" ) if task is None: task = tasks.setup_task(cfg.task) # build model for ensemble model = task.build_model(cfg.model) model.load_state_dict(state["model"], strict=strict, model_cfg=cfg.model) # reset state so it gets loaded for the next model in ensemble state = None ensemble.append(model) return ensemble, cfg, task def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = os.listdir(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = float(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, "wb") as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if i == 2: logger.error(traceback.format_exc()) def save_state( filename, cfg: FairseqConfig, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None, **kwargs, ): from fairseq import utils if optim_history is None: optim_history = [] if extra_state is None: extra_state = {} state_dict = { "cfg": cfg, "args": kwargs.get("args", None), "model": model_state_dict or {}, "optimizer_history": optim_history + [ { "criterion_name": criterion.__class__.__name__, "optimizer_name": optimizer.__class__.__name__, "lr_scheduler_state": lr_scheduler.state_dict(), "num_updates": num_updates, } ], "extra_state": extra_state, } if utils.has_parameters(criterion): state_dict["criterion"] = criterion.state_dict() if cfg is None: cfg = state_dict["args"] assert cfg is not None, "must provide cfg or args" if isinstance(cfg, DictConfig): no_save_optimizer_state = cfg.checkpoint.no_save_optimizer_state else: no_save_optimizer_state = cfg.no_save_optimizer_state if not no_save_optimizer_state: state_dict["last_optimizer_state"] = optimizer.state_dict() # keep everything on CPU state_dict = utils.move_to_cpu(state_dict) if PathManager.supports_rename(filename): # do atomic save with PathManager.open(filename + ".tmp", "wb") as f: torch_persistent_save(state_dict, f) PathManager.rename(filename + ".tmp", filename) else: # fallback to non-atomic save with PathManager.open(filename, "wb") as f: torch_persistent_save(state_dict, f) def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" from fairseq import models, registry, tasks # add optimizer_history if "optimizer_history" not in state: state["optimizer_history"] = [ {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} ] state["last_optimizer_state"] = state["optimizer"] del state["optimizer"] del state["best_loss"] # move extra_state into sub-dictionary if "epoch" in state and "extra_state" not in state: state["extra_state"] = { "epoch": state["epoch"], "batch_offset": state["batch_offset"], "val_loss": state["val_loss"], } del state["epoch"] del state["batch_offset"] del state["val_loss"] # reduce optimizer history's memory usage (only keep the last state) if "optimizer" in state["optimizer_history"][-1]: state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] for optim_hist in state["optimizer_history"]: del optim_hist["optimizer"] # record the optimizer class name if "optimizer_name" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" # move best_loss into lr_scheduler_state if "lr_scheduler_state" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["lr_scheduler_state"] = { "best": state["optimizer_history"][-1]["best_loss"] } del state["optimizer_history"][-1]["best_loss"] # keep track of number of updates if "num_updates" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["num_updates"] = 0 # old model checkpoints may not have separate source/target positions if hasattr(state["args"], "max_positions") and not hasattr( state["args"], "max_source_positions" ): state["args"].max_source_positions = state["args"].max_positions state["args"].max_target_positions = state["args"].max_positions # use stateful training data iterator if "train_iterator" not in state["extra_state"]: state["extra_state"]["train_iterator"] = { "epoch": state["extra_state"]["epoch"], "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), } # backward compatibility, cfg updates if "args" in state and state["args"] is not None: # default to translation task if not hasattr(state["args"], "task"): state["args"].task = "translation" # --raw-text and --lazy-load are deprecated if getattr(state["args"], "raw_text", False): state["args"].dataset_impl = "raw" elif getattr(state["args"], "lazy_load", False): state["args"].dataset_impl = "lazy" # epochs start at 1 if state["extra_state"]["train_iterator"] is not None: state["extra_state"]["train_iterator"]["epoch"] = max( state["extra_state"]["train_iterator"].get("epoch", 1), 1 ) # --remove-bpe ==> --postprocess if hasattr(state["args"], "remove_bpe"): state["args"].post_process = state["args"].remove_bpe # --min-lr ==> --stop-min-lr if hasattr(state["args"], "min_lr"): state["args"].stop_min_lr = state["args"].min_lr del state["args"].min_lr # binary_cross_entropy => wav2vec criterion if ( hasattr(state["args"], "criterion") and state["args"].criterion == "binary_cross_entropy" ): state["args"].criterion = "wav2vec" # speech_pretraining => audio pretraining if ( hasattr(state["args"], "task") and state["args"].task == "speech_pretraining" ): state["args"].task = "audio_pretraining" # audio_cpc => wav2vec if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": state["args"].arch = "wav2vec" # convert legacy float learning rate to List[float] if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): state["args"].lr = [state["args"].lr] state["cfg"] = convert_namespace_to_omegaconf(state["args"]) if "cfg" in state and state["cfg"] is not None: with open_dict(state["cfg"]): # any upgrades for Hydra-based configs pass return state def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ arch = None if model_cfg is not None: arch = ( model_cfg._name if isinstance(model_cfg, DictConfig) else getattr(model_cfg, "arch", None) ) if not model_cfg or arch is None or arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( int(layer_string) for layer_string in layers_to_keep.split(",") ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if isinstance(model_cfg, DictConfig): context = open_dict(model_cfg) else: context = contextlib.ExitStack() with context: if hasattr(model_cfg, "encoder_layers_to_keep"): model_cfg.encoder_layers_to_keep = None if hasattr(model_cfg, "decoder_layers_to_keep"): model_cfg.decoder_layers_to_keep = None return new_state_dict def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=True) return component def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning( "Unable to access checkpoint save directory: {}".format(save_dir) ) raise e else: os.remove(temp_file_path)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from fairseq.modules.quantization import pq, quantization_options, scalar from omegaconf import DictConfig logger = logging.getLogger(__name__) def quantize_model_scalar(model, model_cfg: DictConfig): quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0 if quant_noise_scalar > 0: # quantize_model edits the model in place scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000) return model class Quantizer(object): def __init__(self, config_path, max_epoch, max_update): try: import yaml except ImportError: raise ImportError("Please install yaml with: pip install yaml") # parse config if config_path: with open(config_path) as config_file: config = quantization_options.parse_config_yaml( yaml.safe_load(config_file) ) else: config = quantization_options.parse_config_yaml({}) self.n_centroids_config = config["n_centroids"] self.block_sizes_config = config["block_sizes"] self.layers_to_quantize = config["layers_to_quantize"] # We assume that training will run for a fixed number of epochs # (or updates) and that we should train for equal durations # between iterations of PQ. num_iterations = len(self.layers_to_quantize) if max_epoch > 0: assert max_epoch % num_iterations == 0, ( "for iterative PQ, --max-epoch (={}) must be evenly divisible by " "len(layers_to_quantize) (={})".format(max_epoch, num_iterations) ) self.epoch_schedule = max_epoch // num_iterations else: self.epoch_schedule = None if max_update > 0: assert max_update % num_iterations == 0, ( "for iterative PQ, --max-update (={}) must be evenly divisible by " "len(layers_to_quantize) (={})".format(max_update, num_iterations) ) self.update_schedule = max_update // num_iterations else: self.update_schedule = None assert (self.epoch_schedule is not None) ^ ( self.update_schedule is not None ), "for iterative PQ, cannot specify both --max-update and --max-epoch" # 0 is a special value for quantization step, which will force # the first call to begin_epoch() to call step() self.quantization_step = 0 def set_trainer(self, trainer): self.trainer = trainer self.size_tracker = pq.SizeTracker(self.trainer.get_model()) def step(self): """Move to the next stage of quantization.""" if self.quantization_step >= len(self.layers_to_quantize): # Maybe we just finished the last training step or we loaded # a checkpoint for an iterative PQ model which previously # finished training. Either way, don't quantize again. return logger.info( "quantizing model (step={}; layers_to_quantize[step]={})".format( self.quantization_step, self.layers_to_quantize[self.quantization_step] ) ) quantized_layers = pq.quantize_model_( self.trainer.get_model(), self.size_tracker, self.layers_to_quantize, self.block_sizes_config, self.n_centroids_config, step=self.quantization_step, ) logger.info("quantized layers: {}".format(quantized_layers)) logger.info(self.size_tracker) self.quantization_step += 1 # reintialize the Trainer since model parameters have changed self.trainer.reinitialize() def begin_epoch(self, epoch): """Called at the beginning of each epoch (epochs start at 1).""" if ( ( self.epoch_schedule is not None and epoch > 0 and (epoch - 1) % self.epoch_schedule == 0 ) # we always step once in the beginning, even if using # update-based quantization or self.quantization_step == 0 ): self.step() def step_update(self, num_updates): """Called at the end of each step.""" if ( self.update_schedule is not None and num_updates > 0 and num_updates % self.update_schedule == 0 ): self.step() def state_dict(self): return { "n_centroids_config": self.n_centroids_config, "block_sizes_config": self.block_sizes_config, "layers_to_quantize": self.layers_to_quantize, "epoch_schedule": self.epoch_schedule, "update_schedule": self.update_schedule, "quantization_step": self.quantization_step, } def load_state_dict(self, state_dict): self.n_centroids_config = state_dict["n_centroids_config"] self.block_sizes_config = state_dict["block_sizes_config"] self.layers_to_quantize = state_dict["layers_to_quantize"] self.epoch_schedule = state_dict["epoch_schedule"] self.update_schedule = state_dict["update_schedule"] self.quantization_step = state_dict["quantization_step"]
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/quantization_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq import utils from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict try: import torch_xla.core.xla_model as xm except ImportError: xm = None # Flag to indicate if we're using Megatron # NOTE: this is a temporary hack until we move away from Megatron's model parallel init _USE_MEGATRON = False # Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops. _USE_XLA = False logger = logging.getLogger(__name__) def is_master(cfg: DistributedTrainingConfig): return cfg.distributed_rank == 0 def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False): if cfg.distributed_init_method is not None or cfg.tpu: return if cfg.pipeline_model_parallel: balance_exists = ( cfg.pipeline_balance is not None or cfg.pipeline_encoder_balance is not None or cfg.pipeline_decoder_balance is not None ) devices_exist = ( cfg.pipeline_devices is not None or cfg.pipeline_encoder_devices is not None or cfg.pipeline_decoder_devices is not None ) if not balance_exists: raise ValueError( "--pipeline-balance is currently required for pipeline model parallelism" ) if not devices_exist: raise ValueError( "--pipeline-devices is currently required for pipeline model parallelism" ) cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int) if cfg.pipeline_devices is not None: cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int) num_pipeline_devices = len(set(cfg.pipeline_devices)) else: cfg.pipeline_encoder_devices = utils.eval_str_list( cfg.pipeline_encoder_devices, type=int ) cfg.pipeline_decoder_devices = utils.eval_str_list( cfg.pipeline_decoder_devices, type=int ) num_pipeline_devices = len( set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices) ) gpus_per_node = torch.cuda.device_count() assert ( gpus_per_node >= num_pipeline_devices and gpus_per_node % num_pipeline_devices == 0 ), ( "the number of unique device IDs in --pipeline-devices must evenly divide " "the number of GPUs per node (multi-node pipelining is not yet supported)" ) num_pipelines_per_node = gpus_per_node // num_pipeline_devices # support torch.distributed.launch if all( key in os.environ for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"] ): cfg.distributed_init_method = "env://" cfg.distributed_world_size = int(os.environ["WORLD_SIZE"]) cfg.distributed_rank = int(os.environ["RANK"]) # processes are created by torch.distributed.launch cfg.distributed_no_spawn = True # we can determine the init method automatically for Slurm elif cfg.distributed_port > 0: node_list = os.environ.get("SLURM_STEP_NODELIST") if node_list is None: node_list = os.environ.get("SLURM_JOB_NODELIST") if node_list is not None: try: hostnames = subprocess.check_output( ["scontrol", "show", "hostnames", node_list] ) cfg.distributed_init_method = "tcp://{host}:{port}".format( host=hostnames.split()[0].decode("utf-8"), port=cfg.distributed_port, ) nnodes = int(os.environ.get("SLURM_NNODES")) ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE") if ntasks_per_node is not None: ntasks_per_node = int(ntasks_per_node) else: ntasks = int(os.environ.get("SLURM_NTASKS")) nnodes = int(os.environ.get("SLURM_NNODES")) assert ntasks % nnodes == 0 ntasks_per_node = int(ntasks / nnodes) if ntasks_per_node == 1: gpus_per_node = torch.cuda.device_count() node_id = int(os.environ.get("SLURM_NODEID")) cfg.distributed_rank = node_id * gpus_per_node cfg.distributed_world_size = nnodes * gpus_per_node elif cfg.pipeline_model_parallel: assert ntasks_per_node == num_pipelines_per_node, ( "SLURM --ntasks-per-node must match number of pipelines per " "node (={})".format(num_pipelines_per_node) ) cfg.distributed_no_spawn = True # For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on # the first node, [1, 2] on the second node, etc. This # matches torch.distributed.launch. node_id = int(os.environ.get("SLURM_NODEID")) local_id = int(os.environ.get("SLURM_LOCALID")) cfg.distributed_rank = node_id * num_pipelines_per_node + local_id # In the above example, device_id will always be in [0, 1], # which also matches torch.distributed.launch. cfg.device_id = local_id # We also want to set distributed_world_size to be the total # number of pipelines across all nodes. cfg.distributed_world_size = nnodes * num_pipelines_per_node else: assert ntasks_per_node == cfg.distributed_world_size // nnodes cfg.distributed_no_spawn = True cfg.distributed_rank = int(os.environ.get("SLURM_PROCID")) cfg.device_id = int(os.environ.get("SLURM_LOCALID")) except subprocess.CalledProcessError as e: # scontrol failed raise e except FileNotFoundError: # Slurm is not installed pass elif cfg.distributed_world_size > 1 or force_distributed: # fallback for single node with multiple GPUs assert ( cfg.distributed_world_size <= torch.cuda.device_count() ), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices" port = random.randint(10000, 20000) cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port) if cfg.pipeline_model_parallel: if not cfg.distributed_no_spawn: # When distributed_no_spawn is False, we expect distributed_rank and # distributed_world_size to be based on the total number of GPUs, so # we need to correct them to be based on the number of pipelines. assert cfg.distributed_world_size % num_pipeline_devices == 0 cfg.distributed_world_size = ( cfg.distributed_world_size // num_pipeline_devices ) # In the case of 4-way MP on nodes with 8 GPUs, we want # distributed_rank to be the starting GPU index for each pipeline # i.e., 0, 2, ... assert cfg.distributed_rank % gpus_per_node == 0 assert cfg.distributed_rank % num_pipeline_devices == 0 with open_dict(cfg): cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices # launch one process per pipeline cfg.distributed_num_procs = num_pipelines_per_node # if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0 # and 4, indicating the starting device IDs for each pipeline cfg.device_id *= num_pipeline_devices if cfg.device_id > 0: # if there's multiple pipelines on a node (e.g., 4-way MP on an 8 # GPU node), we need to adjust pipeline_devices accordingly logger.debug( "setting CUDA device={} on rank {}".format( cfg.device_id, cfg.distributed_rank ) ) torch.cuda.set_device(cfg.device_id) with open_dict(cfg): cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices] logger.info( "setting pipeline_devices={} on rank {}".format( cfg.pipeline_devices, cfg.distributed_rank ) ) elif not cfg.distributed_no_spawn: with open_dict(cfg): cfg.distributed_num_procs = min( torch.cuda.device_count(), cfg.distributed_world_size ) def distributed_init(cfg: FairseqConfig): if isinstance(cfg, Namespace): from fairseq.dataclass.utils import convert_namespace_to_omegaconf cfg = convert_namespace_to_omegaconf(cfg) if not cfg.common.tpu: if torch.distributed.is_available() and torch.distributed.is_initialized(): warnings.warn( "Distributed is already initialized, cannot initialize twice!" ) else: logger.info( "distributed init (rank {}): {}".format( cfg.distributed_training.distributed_rank, cfg.distributed_training.distributed_init_method, ) ) dist.init_process_group( backend=cfg.distributed_training.distributed_backend, init_method=cfg.distributed_training.distributed_init_method, world_size=cfg.distributed_training.distributed_world_size, rank=cfg.distributed_training.distributed_rank, ) logger.info( "initialized host {} as rank {}".format( socket.gethostname(), cfg.distributed_training.distributed_rank, ) ) # perform a dummy all-reduce to initialize the NCCL communicator if torch.cuda.is_available(): dist.all_reduce(torch.zeros(1).cuda()) cfg.distributed_training.distributed_rank = torch.distributed.get_rank() else: assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size global _USE_XLA _USE_XLA = True cfg.distributed_training.device_id = xm.get_local_ordinal() cfg.distributed_training.distributed_rank = xm.get_ordinal() xm.rendezvous("distributed_init") # wait for all workers xm.mark_step() if is_master(cfg.distributed_training): logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if cfg.common.model_parallel_size > 1: try: from fairseq.model_parallel.megatron.mpu import ( initialize_model_parallel, model_parallel_cuda_manual_seed, ) except ImportError: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) global _USE_MEGATRON _USE_MEGATRON = True initialize_model_parallel(cfg.common.model_parallel_size) model_parallel_cuda_manual_seed(cfg.common.seed) model_part_number = get_model_parallel_rank() cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number) return cfg.distributed_training.distributed_rank def distributed_main(i, main, cfg: FairseqConfig, kwargs): cfg.distributed_training.device_id = i if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i cfg.distributed_training.distributed_rank = distributed_init(cfg) after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None) if after_distributed_init_fn: cfg = after_distributed_init_fn(cfg) main(cfg, **kwargs) def call_main(cfg: FairseqConfig, main, **kwargs): if cfg.distributed_training.distributed_init_method is None: infer_init_method(cfg.distributed_training) if cfg.distributed_training.distributed_init_method is not None: # distributed training if not cfg.distributed_training.distributed_no_spawn: start_rank = cfg.distributed_training.distributed_rank cfg.distributed_training.distributed_rank = None # assign automatically kwargs["start_rank"] = start_rank torch.multiprocessing.spawn( fn=distributed_main, args=(main, cfg, kwargs), nprocs=min( torch.cuda.device_count(), cfg.distributed_training.distributed_world_size, ), ) else: distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs) elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1: import torch_xla.distributed.xla_multiprocessing as xmp torch.multiprocessing.set_sharing_strategy("file_system") xmp.spawn( fn=distributed_main, args=(main, cfg, kwargs), nprocs=8, # use all 8 TPU cores ) else: # single GPU main main(cfg, **kwargs) def use_xla(): global _USE_XLA return _USE_XLA def new_groups(grouped_ranks: List[List[int]]): if use_xla(): return ("tpu", grouped_ranks) else: groups = [dist.new_group(g) for g in grouped_ranks] my_group_idx = _find_my_group_index(grouped_ranks) return groups[my_group_idx] def _find_my_group_index(grouped_ranks): my_rank = get_global_rank() for i, group in enumerate(grouped_ranks): if my_rank in group: return i raise RuntimeError def _find_my_group(grouped_ranks): index = _find_my_group_index(grouped_ranks) return grouped_ranks[index] def get_rank(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return my_group.index(get_global_rank()) else: return dist.get_rank(group=group) def get_world_size(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return len(my_group) elif torch.distributed.is_initialized(): return dist.get_world_size(group=group) else: return 1 def get_global_group(): if use_xla(): return new_groups([list(range(get_global_world_size()))]) elif torch.distributed.is_initialized(): if not hasattr(get_global_group, "_global_group"): # ideally we could use torch.distributed.group.WORLD, but it seems # to cause random NCCL hangs in some cases get_global_group._global_group = dist.new_group() return get_global_group._global_group else: return None def get_global_rank(): if use_xla(): return xm.get_ordinal() elif torch.distributed.is_initialized(): return torch.distributed.get_rank() else: return 0 def get_global_world_size(): if use_xla(): return xm.xrt_world_size() elif torch.distributed.is_initialized(): return torch.distributed.get_world_size() else: return 1 def get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_data_parallel_group() else: return get_global_group() def get_data_parallel_rank(): """Return my rank for the data parallel group.""" return get_rank(get_data_parallel_group()) def get_data_parallel_world_size(): """Return world size for the data parallel group.""" return get_world_size(get_data_parallel_group()) def get_model_parallel_group(): global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_model_parallel_group() else: return None def get_model_parallel_rank(): """Return my rank for the model parallel group.""" return get_rank(get_model_parallel_group()) def get_model_parallel_world_size(): """Return world size for the model parallel group.""" return get_world_size(get_model_parallel_group()) def all_reduce(tensor, group, op="sum"): if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" tensor = [tensor] # wrap in a list to make xm.all_reduce in-place return xm.all_reduce(op, tensor, groups=group[1])[0] else: if op == "sum": op = dist.ReduceOp.SUM elif op == "max": op = dist.ReduceOp.MAX else: raise NotImplementedError dist.all_reduce(tensor, op=op, group=group) return tensor def broadcast(tensor, src, group): if use_xla(): # XLA doesn't support broadcast, hack it with all_reduce if get_rank(group) != src: tensor.zero_() all_reduce(tensor, group) else: dist.broadcast(tensor, src=src, group=group) def all_to_all(tensor, group): """Perform an all-to-all operation on a 1D Tensor.""" assert tensor.dim() == 1 split_count = get_world_size(group=group) assert tensor.numel() % split_count == 0 if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" return xm.all_to_all( tensor, split_dimension=0, concat_dimension=0, split_count=split_count, groups=group[1], ) else: output = torch.zeros_like(tensor) dist.all_to_all_single(output, tensor, group=group) return output def all_gather(tensor, group, return_tensor=False): """Perform an all-gather operation.""" if use_xla(): result = xm.all_gather(tensor, groups=group[1]) world_size = get_world_size(group=group) result = result.view(world_size, *tensor.size()) if return_tensor: return result else: return [result[i] for i in range(world_size)] else: world_size = get_world_size(group=group) rank = get_rank(group=group) tensor_list = [ tensor if i == rank else torch.empty_like(tensor) for i in range(world_size) ] dist.all_gather(tensor_list, tensor, group=group) if return_tensor: return torch.stack(tensor_list, dim=0) else: return tensor_list def all_gather_list(data, group=None, max_size=16384): """Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable. Args: data (Any): data from the local worker to be gathered on other workers group: group of the collective max_size (int, optional): maximum size of the data to be gathered across workers """ if group is None: group = get_global_group() rank = get_rank(group=group) world_size = get_world_size(group=group) buffer_size = max_size * world_size if ( not hasattr(all_gather_list, "_buffer") or all_gather_list._buffer.numel() < buffer_size ): all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size) all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory() buffer = all_gather_list._buffer buffer.zero_() cpu_buffer = all_gather_list._cpu_buffer data = utils.move_to_cpu(data) enc = pickle.dumps(data) enc_size = len(enc) header_size = 4 # size of header that contains the length of the encoded data size = header_size + enc_size if size > max_size: raise ValueError( "encoded data size ({}) exceeds max_size ({})".format(size, max_size) ) header = struct.pack(">I", enc_size) cpu_buffer[:size] = torch.ByteTensor(list(header + enc)) start = rank * max_size buffer[start : start + size].copy_(cpu_buffer[:size]) all_reduce(buffer, group=group) buffer = buffer.cpu() try: result = [] for i in range(world_size): out_buffer = buffer[i * max_size : (i + 1) * max_size] (enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist())) if enc_size > 0: result.append( pickle.loads( bytes(out_buffer[header_size : header_size + enc_size].tolist()) ) ) return result except pickle.UnpicklingError: raise Exception( "Unable to unpickle data from other workers. all_gather_list requires all " "workers to enter the function together, so this error usually indicates " "that the workers have fallen out of sync somehow. Workers can fall out of " "sync if one of them runs out of memory, or if there are other conditions " "in your training script that can cause one worker to finish an epoch " "while other workers are still iterating over their portions of the data. " "Try rerunning with --ddp-backend=no_c10d and see if that helps." ) def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]: """ AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective """ data_keys = list(data.keys()) # We want to separately reduce items that are already on the # device and items on CPU for performance reasons. cpu_data = OrderedDict() device_data = OrderedDict() for k in data_keys: t = data[k] if not torch.is_tensor(t): cpu_data[k] = torch.tensor(t, dtype=torch.double) elif t.device.type != device.type: cpu_data[k] = t.to(dtype=torch.double) else: device_data[k] = t.to(dtype=torch.double) def _all_reduce_dict(data: OrderedDict): if len(data) == 0: return data buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device) all_reduce(buf, group=group) split_buf = torch.split(buf, [t.numel() for t in data.values()]) reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())] return OrderedDict(zip(data.keys(), reduced_data)) cpu_data = _all_reduce_dict(cpu_data) device_data = _all_reduce_dict(device_data) def get_from_stack(key): if key in cpu_data: return cpu_data[key] elif key in device_data: return device_data[key] raise KeyError return OrderedDict([(key, get_from_stack(key)) for key in data_keys]) def broadcast_tensors( tensors: Optional[List[torch.Tensor]], src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Broadcasts a list of tensors without other (non-src) ranks needing to know the dtypes/shapes of the tensors. """ if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") # share metadata first to simplify transfer is_src_rank = (get_rank(group) == src_rank) if is_src_rank: metadata = [ {"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors ] metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device) else: metadata = _broadcast_object_slow(None, src_rank, group, dist_device) out_tensors = [] for i, meta in enumerate(metadata): if is_src_rank: tensor = tensors[i] broadcast(tensors[i].to(dist_device), src=src_rank, group=group) else: tensor = torch.zeros( [meta["size"].numel()], dtype=meta["dtype"], device=dist_device ) broadcast(tensor, src=src_rank, group=group) tensor = tensor.view(meta["size"]).to(meta["device"]) out_tensors.append(tensor) return out_tensors def broadcast_object( obj: Any, src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> Any: """Broadcast an arbitrary Python object to other workers.""" if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") if get_rank(group) == src_rank: # split the tensors from the non-tensors so we can broadcast them # directly, avoiding unnecessary serialization/deserialization tensors = [] obj = _split_tensors_from_obj(obj, tensors) obj = _broadcast_object_slow(obj, src_rank, group, dist_device) tensors = broadcast_tensors(tensors, src_rank, group, dist_device) else: obj = _broadcast_object_slow(None, src_rank, group, dist_device) tensors = broadcast_tensors(None, src_rank, group, dist_device) return _put_tensors_in_obj(obj, tensors) def _broadcast_object_slow( obj: Any, src_rank: int, group: object, dist_device: torch.device, ) -> Any: if get_rank(group) == src_rank: # Emit data buffer = io.BytesIO() torch.save(obj, buffer) buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device) length = torch.LongTensor([len(buffer)]).to(dist_device) broadcast(length, src=src_rank, group=group) broadcast(buffer, src=src_rank, group=group) else: # Fetch from the source length = torch.LongTensor([0]).to(dist_device) broadcast(length, src=src_rank, group=group) buffer = torch.ByteTensor(int(length.item())).to(dist_device) broadcast(buffer, src=src_rank, group=group) buffer = io.BytesIO(buffer.cpu().numpy()) obj = torch.load(buffer, map_location="cpu") return obj @dataclass(frozen=True) class _TensorPlaceholder: index: int def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if torch.is_tensor(obj): placeholder = _TensorPlaceholder(index=len(tensors)) tensors.append(obj) return placeholder elif isinstance(obj, dict): return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_split_tensors_from_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_split_tensors_from_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_split_tensors_from_obj(v, tensors) for v in obj} else: return obj def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if isinstance(obj, _TensorPlaceholder): return tensors[obj.index] elif isinstance(obj, dict): return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_put_tensors_in_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_put_tensors_in_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_put_tensors_in_obj(v, tensors) for v in obj} else: return obj
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/distributed_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Utilities for working with the local dataset cache. This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_. and `huggingface <https://github.com/huggingface>`_. """ import fnmatch import json import logging import os import shutil import tarfile import tempfile from functools import partial, wraps from hashlib import sha256 from io import open try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv( "TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch") ) ) default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq") try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)) except (AttributeError, ImportError): PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path) CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" logger = logging.getLogger(__name__) # pylint: disable=invalid-name def load_archive_file(archive_file): # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=None) except EnvironmentError: logger.info( "Archive name '{}' was not found in archive name list. " "We assumed '{}' was a path or URL but couldn't find any file " "associated to this path or URL.".format( archive_file, archive_file, ) ) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info( "loading archive file {} from cache at {}".format( archive_file, resolved_archive_file ) ) # Extract archive to temp dir and replace .tar.bz2 if necessary tempdir = None if not os.path.isdir(resolved_archive_file): tempdir = tempfile.mkdtemp() logger.info( "extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir ) ) ext = os.path.splitext(archive_file)[1][1:] with tarfile.open(resolved_archive_file, "r:" + ext) as archive: top_dir = os.path.commonprefix(archive.getnames()) archive.extractall(tempdir) os.remove(resolved_archive_file) shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file) shutil.rmtree(tempdir) return resolved_archive_file def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the URL's, delimited by a period. """ url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ("http", "https", "s3"): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError( "unable to parse {} as a URL or as a local path".format(url_or_filename) ) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): from botocore.exceptions import ClientError try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def request_wrap_timeout(func, url): import requests for attempt, timeout in enumerate([10, 20, 40, 60, 60]): try: return func(timeout=timeout) except requests.exceptions.Timeout as e: logger.warning( "Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs", url, attempt, timeout, exc_info=e, ) continue raise RuntimeError(f"Unable to fetch file {url}") def http_get(url, temp_file): import requests from tqdm import tqdm req = request_wrap_timeout(partial(requests.get, url, stream=True), url) content_length = req.headers.get("Content-Length") total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: try: import requests response = request_wrap_timeout( partial(requests.head, url, allow_redirects=True), url ) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except RuntimeError: etag = None filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*") matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, "wb") as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: output_string = json.dumps(meta) meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): """ Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. """ collection = set() with open(filename, "r", encoding="utf-8") as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/file_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List, Optional import torch import torch.nn as nn from fairseq.token_generation_constraints import ( ConstraintState, OrderedConstraintState, UnorderedConstraintState, ) from torch import Tensor class Search(nn.Module): def __init__(self, tgt_dict): super().__init__() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.src_lengths = torch.tensor(-1) self.supports_constraints = False self.stop_on_max_len = False def step( self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None ): """Take a single search step. Args: step: the current search step, starting at 0 lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step scores: (bsz x input_beam_size x step) the historical model scores of each hypothesis up to this point prev_output_tokens: (bsz x step) the previously generated oputput tokens original_batch_idxs: (bsz) the tensor with the batch indices, in the range [0, bsz) this is useful in case there has been applied a re-ordering and we need to know the orignal indices Return: A tuple of (scores, indices, beams) where: scores: (bsz x output_beam_size) the scores of the chosen elements; output_beam_size can be larger than input_beam_size, e.g., we may return 2*input_beam_size to account for EOS indices: (bsz x output_beam_size) the indices of the chosen elements beams: (bsz x output_beam_size) the hypothesis ids of the chosen elements, in the range [0, input_beam_size) """ raise NotImplementedError @torch.jit.export def set_src_lengths(self, src_lengths): self.src_lengths = src_lengths @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): """Initialize constraint states for constrained decoding (if supported). Args: batch_constraints: (torch.Tensor, optional) the list of constraints, in packed form beam_size: (int) the beam size Returns: *encoder_out* rearranged according to *new_order* """ pass def prune_sentences(self, batch_idxs: Tensor): """ Removes constraint states for completed sentences (if supported). This is called from sequence_generator._generate() when sentences are deleted from the batch. Args: batch_idxs: Indices of *sentences* whose constraint state should be *kept*. """ pass def update_constraints(self, active_hypos: Tensor): """ Updates the constraint states by selecting the beam items that are retained. This is called at each time step of sequence_generator._generate() when the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size. Args: active_hypos: (batch size, beam size) list of integers denoting, for each sentence, which beam candidate items should be kept. """ pass class BeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.constraint_states = None @torch.jit.export def step( self, step: int, lprobs, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # At this point, beams_buf and indices_buf are single-dim and contain relative indices return scores_buf, indices_buf, beams_buf class PrefixConstrainedBeamSearch(Search): def __init__(self, tgt_dict, prefix_allowed_tokens_fn): super().__init__(tgt_dict) self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self.stop_on_max_len = True @torch.jit.export def apply_mask(self, x, prev_output_tokens, original_batch_idxs): beam_size = x.shape[0] // original_batch_idxs.shape[0] original_batch_idxs = ( original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist() ) mask = torch.full_like(x, -math.inf) for sent_i, (sent, batch_i) in enumerate( zip(prev_output_tokens, original_batch_idxs) ): mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0 return mask @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Tensor, prev_output_tokens: Tensor, original_batch_idxs: Tensor, ): bsz, beam_size, vocab_size = lprobs.size() lprobs += self.apply_mask( lprobs.view(bsz * beam_size, 1, vocab_size), prev_output_tokens, original_batch_idxs, ).view(bsz, beam_size, vocab_size) if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) return scores_buf, indices_buf, beams_buf class LexicallyConstrainedBeamSearch(Search): """Implements lexically constrained beam search as described in Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation. Post & Vilar, NAACL 2018. https://www.aclweb.org/anthology/N18-1119/ and Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting. Hu et al, NAACL 2019. https://www.aclweb.org/anthology/N19-1090/ This is accomplished by maintaining, for each beam hypothesis, a ConstraintState object (see constraints.py) that tracks which constraints have been generated and using this information to shape the beam for each input sentence. """ def __init__(self, tgt_dict, representation): super().__init__(tgt_dict) self.representation = representation self.vocab_size = len(tgt_dict) self.num_cands = 0 self.supports_constraints = True @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): self.constraint_states = [] for constraint_tensor in batch_constraints: if self.representation == "ordered": constraint_state = OrderedConstraintState.create(constraint_tensor) elif self.representation == "unordered": constraint_state = UnorderedConstraintState.create(constraint_tensor) self.constraint_states.append([constraint_state for i in range(beam_size)]) @torch.jit.export def prune_sentences(self, batch_idxs: Tensor): self.constraint_states = [ self.constraint_states[i] for i in batch_idxs.tolist() ] @torch.jit.export def update_constraints(self, active_hypos: Tensor): if self.constraint_states: batch_size = active_hypos.size(0) for sentid in range(batch_size): self.constraint_states[sentid] = [ self.constraint_states[sentid][i] for i in active_hypos[sentid] ] @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): """ A constrained step builds a large candidates list from the following: - the top 2 * {beam_size} items over the whole beam - for each item in the beam - the top {each_k} (default 1) - all next constraints We then compute the constrained state of each beam item, and assign stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so on. We then sort by (stripe, score), and truncate the list at 2 * beam size. Args: step: the decoder step lprobs: (batch size, beam size, target vocab) the target-vocab distributions for each item in the beam. Retrun: A tuple of (scores, indices, beams, constraints) where: scores: (batch, output beam size) the scores of the chosen elements indices: (batch, output beam size) the target vocab indices of the chosen elements beams: (batch, output beam size) the 0-indexed hypothesis ids of the chosen elements constraints: (batch, output beam size) the new constraint states """ each_k = 1 device = lprobs.device batch_size, beam_size, vocab_size = lprobs.size() self.num_cands = min( # Just take the k-best. We'll get another k from the 1-best from each # row, plus more from the constraints beam_size * 2, lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad ) # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items constraint_states = self.constraint_states if constraint_states and step > 0: not_finished_indices = [] for sentno, sent_constraints in enumerate(constraint_states): for beamno, state in enumerate(sent_constraints): index = sentno * beam_size + beamno if not state.finished: not_finished_indices.append(index) not_finished_indices = torch.tensor(not_finished_indices) if not_finished_indices.numel() > 0: lprobs.view(batch_size * beam_size, -1)[ not_finished_indices, self.eos ] = -math.inf if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam entry for each batch item lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(batch_size, -1), self.num_cands, ) scores_buf, indices_buf = top_prediction # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # Short circuit if there are no constraints in this batch if not constraint_states: return scores_buf, indices_buf, beams_buf # STEP 1: get top-1 from each hypothesis across all sentences in the batch if step > 0: top_scores, top_indices = torch.topk( lprobs.view(batch_size * beam_size, -1), k=each_k, dim=1, ) top_scores = top_scores.view(batch_size, -1) top_indices = top_indices.view(batch_size, -1) scores_buf = torch.cat((scores_buf, top_scores), dim=1) indices_buf = torch.cat((indices_buf, top_indices), dim=1) new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1) beams_buf = torch.cat((beams_buf, new_beams), dim=1) # Now, process sentences in the batch one by one. new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device) new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() for sentno, states in enumerate(constraint_states): scores, indices, beams, new_states = self.step_sentence( step, sentno, lprobs[sentno], constraint_states[sentno], beams_buf[sentno].clone(), indices_buf[sentno].clone(), scores_buf[sentno].clone(), ) new_scores_buf[sentno] = scores new_indices_buf[sentno] = indices new_beams_buf[sentno] = beams self.constraint_states[sentno] = new_states return new_scores_buf, new_indices_buf, new_beams_buf @torch.jit.export def step_sentence( self, step: int, sentno: int, lprobs: Tensor, constraint_states: List[List[ConstraintState]], beams_buf: Tensor, indices_buf: Tensor, scores_buf: Tensor, ): """Does per-sentence processing. Adds all constraints for each hypothesis to the list of candidates; then removes duplicates, sorts, and dynamically stripes across the banks. All tensor inputs are collapsed to those pertaining to a single input sentence. """ device = lprobs.device # STEP 2: Add all constraints for each beam item for beamno, state in enumerate(constraint_states): next_tokens = torch.tensor(list(state.next_tokens()), device=device).long() if next_tokens.numel() != 0: indices_buf = torch.cat((indices_buf, next_tokens)) next_beams = ( torch.tensor(beamno, device=device) .repeat(next_tokens.size(0)) .long() ) beams_buf = torch.cat((beams_buf, next_beams)) next_values = lprobs[beamno].take(next_tokens.view(-1)) scores_buf = torch.cat((scores_buf, next_values)) # At the 0th time step, there is just one beam item if step == 0: break # STEP 3: Compute the "bank" for each candidate. This is the # number of constraints it's generated. We need this so that # we can do round-robin allocation of the beam across these # banks. If C is the number of constraints, we select the best # item in bank C, then the best in bank C-1, etc, followed by # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so # on, until the maximum beam size. We accomplish this by # creating a sort key and striping across the banks. # Compute the new states for all candidates cands_size = indices_buf.size(0) constraint_states = [ constraint_states[beams_buf[i]].advance(indices_buf[i]) for i in range(cands_size) ] banks = torch.tensor([state.bank for state in constraint_states], device=device) # STEP 4: Sort num_constraint_tokens = len(state.tokens) # Sort by keys (bank, score) (i.e., sort banks together, and scores # within banks). AFAIK pytorch doesn't support either stable sort or # multi-key sorting, so we have to hack this. MAX_SCORE = -100 sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf sort_values, sort_indices = sort_key.sort(dim=0, descending=True) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] banks = banks[sort_indices] # Sort the constraints to follow suit constraint_states = [constraint_states[i] for i in sort_indices] # STEP 5: Remove duplicates. The topk calls (overall and # per-row) plus the per-row generation of constraints will # produce duplicates. Here we remove them. def roll(t): """Rolls a 1d tensor left by 1. [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3] """ return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0) # We map candidates (beam, token_id) to a single dimension. # This is then shifted by 1. We can then easily identify # duplicates and create a mask that identifies unique # extensions. uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf uniques_mask = roll(uniques_mask) != uniques_mask # Use the mask to pare down the data structures scores_buf = torch.masked_select(scores_buf, uniques_mask) indices_buf = torch.masked_select(indices_buf, uniques_mask) beams_buf = torch.masked_select(beams_buf, uniques_mask) banks = torch.masked_select(banks, uniques_mask) i = 1 for mask in uniques_mask[1:]: if not mask: constraint_states.pop(i) i += mask # STEP 6: Assign IDs round-robin across banks, sort, and # truncate. Now that the candidates are sorted by (bank, # score) and uniqed, we dynamically allocate the {beam_size} # beam by striping across the candidates. These stripes will # be used as sort keys to do round-robin selection. This is # accomplished in a single pass with offsets. Sorting by # highest-banks (furthest-along hypotheses) first ensures # progress through the constraints. # # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0 # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1 # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7 # = 0 5 10 1 6 11 13 2 7 12 3 8 # # Sorting by this then gives the following banks: # # 3 2 1 0 3 2 1 0 3 2 1 2 # # We'll take the top {beam_size} of these. stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)] stripes = torch.zeros_like(banks) cur_bank_count = -1 cur_bank = banks[0] for i, bank in enumerate(banks): if bank != cur_bank: cur_bank_count = 0 cur_bank = bank else: cur_bank_count += 1 stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count] # STEP 7: Sort by the stripes values sort_values, sort_indices = stripes.sort(dim=0) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] constraint_states = [constraint_states[i] for i in sort_indices] # STEP 8: Truncate to the candidates size! scores_buf = scores_buf[: self.num_cands] indices_buf = indices_buf[: self.num_cands] beams_buf = beams_buf[: self.num_cands] return scores_buf, indices_buf, beams_buf, constraint_states class LengthConstrainedBeamSearch(Search): def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b): super().__init__(tgt_dict) self.min_len_a = min_len_a self.min_len_b = min_len_b self.max_len_a = max_len_a self.max_len_b = max_len_b self.beam = BeamSearch(tgt_dict) self.needs_src_lengths = True def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): min_lens = self.min_len_a * self.src_lengths + self.min_len_b max_lens = self.max_len_a * self.src_lengths + self.max_len_b lprobs[step < min_lens, :, self.eos] = -math.inf lprobs[step >= max_lens, :, self.eos] = 0 return self.beam.step(step, lprobs, scores) class DiverseBeamSearch(Search): """Diverse Beam Search. See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models" for details. We only implement the Hamming Diversity penalty here, which performed best in the original paper. """ def __init__(self, tgt_dict, num_groups, diversity_strength): super().__init__(tgt_dict) self.num_groups = num_groups self.diversity_strength = -diversity_strength self.beam = BeamSearch(tgt_dict) @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if beam_size % self.num_groups != 0: raise ValueError( "DiverseBeamSearch requires --beam to be divisible by the number of groups" ) # initialize diversity penalty diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs) scores_G, indices_G, beams_G = [], [], [] for g in range(self.num_groups): lprobs_g = lprobs[:, g :: self.num_groups, :] scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None # apply diversity penalty if g > 0: lprobs_g = torch.add( lprobs_g, other=diversity_buf.unsqueeze(1), alpha=self.diversity_strength, ) else: lprobs_g = lprobs_g.contiguous() scores_buf, indices_buf, beams_buf = self.beam.step( step, lprobs_g, scores_g ) beams_buf.mul_(self.num_groups).add_(g) scores_G.append(scores_buf.clone()) indices_G.append(indices_buf.clone()) beams_G.append(beams_buf.clone()) # update diversity penalty diversity_buf.scatter_add_( 1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf) ) # interleave results from different groups scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1) indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1) beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1) return scores_buf, indices_buf, beams_buf class Sampling(Search): sampling_topk: int sampling_topp: float def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0): super().__init__(tgt_dict) self.sampling_topk = sampling_topk self.sampling_topp = sampling_topp def _sample_topp(self, lprobs): """Sample among the smallest set of elements whose cumulative probability mass exceeds p. See `"The Curious Case of Neural Text Degeneration" (Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_. Args: lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step Return: A tuple of (trimed_probs, truncated_indices) where: trimed_probs: (bsz x input_beam_size x ?) the model's probabilities over the elements selected to sample from. The width of the third dimension is determined by top-P. truncated_indices: (bsz x input_beam_size x ?) the indices of the chosen elements. """ probs = lprobs.exp_() # sort the last dimension (vocab dimension) in descending order sorted_probs, sorted_indices = probs.sort(descending=True) # compute a mask to indicate the words to be included in the top-P set. cumsum_probs = sorted_probs.cumsum(dim=2) mask = cumsum_probs.lt(self.sampling_topp) # note that mask was computed by 'lt'. One more word needs to be included # so that the cumulative probability mass can exceed p. cumsum_mask = mask.cumsum(dim=2) last_included = cumsum_mask[:, :, -1:] last_included.clamp_(0, mask.size()[2] - 1) mask = mask.scatter_(2, last_included, 1) # truncate unnecessary dims. max_dim = last_included.max() truncated_mask = mask[:, :, : max_dim + 1] truncated_probs = sorted_probs[:, :, : max_dim + 1] truncated_indices = sorted_indices[:, :, : max_dim + 1] # trim the words that are not in top-P by setting their probabilities # to 0, so that they would not be sampled later. trim_mask = ~truncated_mask trimed_probs = truncated_probs.masked_fill_(trim_mask, 0) return trimed_probs, truncated_indices @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() if self.sampling_topp > 0: # only sample from the smallest set of words whose cumulative probability mass exceeds p probs, top_indices = self._sample_topp(lprobs) elif self.sampling_topk > 0: # only sample from top-k candidates lprobs, top_indices = lprobs.topk(self.sampling_topk) probs = lprobs.exp_() else: probs = lprobs.exp_() # dummy data to be consistent with true branch for type check top_indices = torch.empty(0).to(probs) # sample if step == 0: indices_buf = torch.multinomial( probs.view(bsz, -1), beam_size, replacement=True, ).view(bsz, beam_size) else: indices_buf = torch.multinomial( probs.view(bsz * beam_size, -1), 1, replacement=True, ).view(bsz, beam_size) if step == 0: # expand to beam size probs = probs.expand(bsz, beam_size, -1) # gather scores scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1)) scores_buf = scores_buf.log_().view(bsz, -1) # remap indices if using top-k or top-P sampling if self.sampling_topk > 0 or self.sampling_topp > 0: indices_buf = torch.gather( top_indices.expand(bsz, beam_size, -1), dim=2, index=indices_buf.unsqueeze(-1), ).squeeze(2) if step == 0: beams_buf = indices_buf.new_zeros(bsz, beam_size) else: beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1) # make scores cumulative scores_buf.add_( torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf) ) return scores_buf, indices_buf, beams_buf class DiverseSiblingsSearch(Search): """ Beam search with diverse siblings. See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details. https://arxiv.org/abs/1611.08562 1/ Calculate hypotheses for each beam 2/ Intra-sibling ordering 3/ Rewrite scores 4/ Choose top K hypotheses if diversity_rate == 0 is equivalent to BeamSearch """ def __init__(self, tgt_dict, diversity_rate): super().__init__(tgt_dict) self.diversity_rate = diversity_rate self.beam = BeamSearch(tgt_dict) def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() k = min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ) s_list: List[Tensor] i_list: List[Tensor] s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)] i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)] sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate if step == 0: return self.beam.step(step, lprobs, scores) lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) # 1/ Calculate hypotheses for each beam for i in range(beam_size): torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i])) i_list[i].fmod_(vocab_size) # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores s_list[i].sub_(sibling_score) # 4/ Choose top K hypotheses indices = torch.stack(i_list, dim=1).view(bsz, -1) final_scores = torch.empty(0).to(lprobs) final_indices = torch.LongTensor().to(device=lprobs.device) final_beams = torch.LongTensor().to(device=lprobs.device) (final_scores, final_indices) = torch.topk( torch.stack(s_list, dim=1).view(bsz, -1), k, ) final_beams = final_indices // k for i in range(bsz): final_indices[i] = indices[i][final_indices[i]] return final_scores, final_indices, final_beams
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/search.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil from typing import List, Optional logger = logging.getLogger(__file__) try: from fvcore.common.file_io import PathManager as FVCorePathManager try: # [FB only - for now] AWS PathHandler for PathManager from .fb_pathhandlers import S3PathHandler FVCorePathManager.register_handler(S3PathHandler()) except KeyError: logging.warning("S3PathHandler already registered.") except ImportError: logging.debug( "S3PathHandler couldn't be imported. Either missing fb-only files, or boto3 module." ) except ImportError: FVCorePathManager = None class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from fvcore's PathManager abstraction (for transparently handling various internal backends). """ @staticmethod def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if FVCorePathManager: return FVCorePathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) @staticmethod def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if FVCorePathManager: return FVCorePathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) @staticmethod def get_local_path(path: str, **kwargs) -> str: if FVCorePathManager: return FVCorePathManager.get_local_path(path, **kwargs) return path @staticmethod def exists(path: str) -> bool: if FVCorePathManager: return FVCorePathManager.exists(path) return os.path.exists(path) @staticmethod def isfile(path: str) -> bool: if FVCorePathManager: return FVCorePathManager.isfile(path) return os.path.isfile(path) @staticmethod def ls(path: str) -> List[str]: if FVCorePathManager: return FVCorePathManager.ls(path) return os.listdir(path) @staticmethod def mkdirs(path: str) -> None: if FVCorePathManager: return FVCorePathManager.mkdirs(path) os.makedirs(path, exist_ok=True) @staticmethod def rm(path: str) -> None: if FVCorePathManager: return FVCorePathManager.rm(path) os.remove(path) @staticmethod def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) @staticmethod def register_handler(handler) -> None: if FVCorePathManager: return FVCorePathManager.register_handler(handler=handler) @staticmethod def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if FVCorePathManager: return FVCorePathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) @staticmethod def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if FVCorePathManager: for p in FVCorePathManager._path_handlers.keys(): if path.startswith(p): return True return False @staticmethod def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) @staticmethod def rename(src: str, dst: str): os.rename(src, dst)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/file_io.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, distributed_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler logger = logging.getLogger(__name__) class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if cfg.common.fp16: self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) if not cfg.distributed_training.pipeline_model_parallel: self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() @property def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 @property def criterion(self): if self._wrapped_criterion is None: if ( utils.has_parameters(self._criterion) and self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ): self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if self._wrapped_model is None: if self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, ) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.cfg.common.fp16 or self.cfg.common.bf16: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info("NOTE: your device may support faster training with --fp16") self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" if self.is_data_parallel_master: # only save one checkpoint extra_state["metrics"] = metrics.state_dict() extra_state["previous_training_time"] = self.cumulative_training_time() checkpoint_utils.save_state( filename, self.cfg, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: self.get_model().load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), "Criterion does not match; please reset the optimizer (--reset-optimizer)." assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), "Optimizer does not match; please reset the optimizer (--reset-optimizer)." if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: epoch = extra_state["train_iterator"]["epoch"] if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch @metrics.aggregate("train") def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass import torch_xla.core.xla_model as xm xm.mark_step() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() logging_outputs, ( sample_size, ooms, total_train_time, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch, ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.distributed_wrapper != "SlowMo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, ) raise except OverflowError as e: overflow = True logger.info(f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}") grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer after the step if hasattr(self.model, "perform_additional_optimizer_actions"): if hasattr(self.optimizer, "fp32_params"): self.model.perform_additional_optimizer_actions( self.optimizer.optimizer, self.optimizer.fp32_params ) else: self.model.perform_additional_optimizer_actions( self.optimizer.optimizer ) logging_output = None if ( not overflow or self.cfg.distributed_training.distributed_wrapper == "SlowMo" ): self.set_num_updates(self.get_num_updates() + 1) if self.tpu: # mark step on TPUs import torch_xla.core.xla_model as xm xm.mark_step() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0, ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0, ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm, ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm, ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16: metrics.log_scalar( "loss_scale", self.optimizer.scaler.loss_scale, priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output @metrics.aggregate("valid") def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers xm.mark_step() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_output def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True if self.cuda: if self.pipeline_model_parallel: if "target" in sample: sample["target"] = utils.move_to_cuda( sample["target"], device=self.last_device ) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) def apply_half(t): if t.dtype is torch.float32: return t.half() return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( torch.isfinite(tensor).all() or (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all() ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=no_c10d. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _catalog_shared_params(module, memo=None, prefix=""): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ("." if prefix else "") + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ("." if prefix else "") + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1] def _get_module_by_path(module, path): path = path.split(".") for name in path: module = getattr(module, name) return module def _set_module_by_path(module, path, value): path = path.split(".") for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from collections import Counter import torch from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line def safe_readline(f): pos = f.tell() while True: try: return f.readline() except UnicodeDecodeError: pos -= 1 f.seek(pos) # search where this character begins class Binarizer: @staticmethod def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ): nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f: f.seek(offset) # next(f) breaks f.tell(), hence readline() must be used line = safe_readline(f) while line: # f.tell() does not always give the byte position in the file # sometimes it skips to a very large number # it is unlikely that through a normal read we go from # end bytes to end + 2**32 bytes (4 GB) and this makes it unlikely # that the procedure breaks by the undeterministic behavior of # f.tell() if end > 0 and f.tell() > end and f.tell() < end + 2**32: break if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) line = f.readline() return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } @staticmethod def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1): nseq = 0 with open(PathManager.get_local_path(filename), "r") as f: f.seek(offset) line = safe_readline(f) while line: if end > 0 and f.tell() > end: break ids = alignment_parser(line) nseq += 1 consumer(ids) line = f.readline() return {"nseq": nseq} @staticmethod def find_offsets(filename, num_chunks): with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f: size = os.fstat(f.fileno()).st_size chunk_size = size // num_chunks offsets = [0 for _ in range(num_chunks + 1)] for i in range(1, num_chunks): f.seek(chunk_size * i) safe_readline(f) offsets[i] = f.tell() return offsets
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/binarizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Implements tracking of constraints for a beam item. A list of constraints is given as a list of one or more token sequences, each of length at least one token. For example, for an input sentence > Die maschinelle Übersetzung ist schwer zu kontrollieren. We could have the constraints: * to influence * hard There are two implementations: * OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints. * UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints. The difference is that in the first, the constraints are assumed to be in order; the algorithm will permit zero or more tokens between them. In the second, the constraints are not ordered, so many orderings will be explored. The same sequence can be present any number of times, and will appear that many times in the output. """ from collections import Counter from typing import List, Optional, Set, Tuple import torch class ConstraintState: def __init__(self): pass def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = ( 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) ) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset : offset + this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]: """ Transforms *one row* of a packed constraint tensor (e.g., for one sentence in the batch) into a list of constraint tensors. """ constraint_list = [] num_constraints = constraint_tensor[0] constraints = constraint_tensor.tolist() offset = 1 for i in range(num_constraints): where = constraints.index(0, offset) constraint_list.append(constraint_tensor[offset:where]) offset = where + 1 return constraint_list class ConstraintNode: """ Represents a node in a trie managing unordered constraints. """ def __init__(self, token: int = None, parent=None): # The token associate with this node (None for the root) self.token = int(token) if token is not None else None # The parent (None at the root) self.parent = parent # Whether this node is a completed constraint self.terminal = 0 # List of child nodes self.children = {} # The cumulative number of constraints from this point in the # trie forward self.num_constraints = 0 @property def id(self): return self.token def __str__(self): term = self.terminal != 0 return f"[{self.token}].{term}#{self.num_constraints}" def __getitem__(self, key: int): return self.children.get(key, None) def next_tokens(self) -> Set[int]: """The set of child labels.""" return set(self.children.keys()) @staticmethod def create(constraints: List[List[int]]): root = ConstraintNode() for sequence in constraints: root.add_sequence(sequence) return root @staticmethod def print_graph(node: "ConstraintNode"): if len(node.children) == 0: return str(node) else: s = f"({node}" for child in node.children.values(): s += " " + ConstraintNode.print_graph(child) s += ")" return s def token_counts(self) -> Counter: """Returns a counter of the number of times each token is used in a constraint. """ token_counts = Counter() kids = list(self.children.values()) while len(kids) > 0: kid = kids.pop() token_counts[kid.id] += kid.num_constraints kids += list(kid.children.values()) return token_counts def tokens(self) -> Set[int]: """Returns the set of tokens in constraints.""" return set(self.token_counts().keys()) def add_sequence(self, sequence: List[int]): """Adds a constraint, represented as a list of integers, to the trie.""" assert len(sequence) > 0 token = int(sequence[0]) if token not in self.children: self.children[token] = ConstraintNode(token, parent=self) node = self.children[token] if len(sequence) == 1: node.terminal += 1 node.num_constraints += 1 parent = node.parent while parent is not None: parent.num_constraints += 1 parent = parent.parent else: node.add_sequence(sequence[1:]) class UnorderedConstraintState(ConstraintState): """ Records progress through the set of constraints for each item in the beam using a trie. """ def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None): self.node = node if copy_from is None: # The root node self.root = node # The set of states in the graph that have been completed self.completed = Counter() # The... self.generated = Counter() # The list of tokens we need to generate self.needed_tokens = self.root.tokens() else: self.completed = Counter(copy_from.completed) self.generated = Counter(copy_from.generated) self.root = copy_from.root # Mark the node as generated if self.node != self.root: self.generated[node] += 1 @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) constraint_trie_root = ConstraintNode.create(constraint_list) return UnorderedConstraintState(constraint_trie_root) def __str__(self): gen_str = ",".join([str(node) for node in self.generated]) return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}" def __copy__(self): copied_state = UnorderedConstraintState(self.node, copy_from=self) return copied_state def copy(self): return self.__copy__() @property def name(self): if self.node.id is None: return "ROOT" else: return str(self.node.id) @property def is_root(self): return self.node == self.root @property def bank(self): return sum(self.generated.values()) @property def num_completed(self): """The number of constraints (not constraint tokens) that are completed. In addition to the already-completed states, we need to account for the current state, which might get marked as completed when another token is generated. """ in_final = self.node.terminal and self.completed[self.node] < self.node.terminal return sum(self.completed.values()) + in_final @property def finished(self): return self.root.num_constraints - self.num_completed == 0 @property def token_counts(self): return self.root.token_counts() @property def tokens(self): return self.root.tokens() @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" if self.node != self.root: return self.root.next_tokens().union(self.node.next_tokens()) else: return self.root.next_tokens() def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) next_state = None child = self.node[token] if child is not None and self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) def rewind(): """If we're mid-trie and an "illegal" token is chosen next, we need to reset our state to the root state. However, along the way, we need to check whether a prefix of the current trie state represents a state we could mark as completed. """ node = self.node while node != self.root: if node.terminal and self.completed[node] < node.terminal: next_state.completed[node] += 1 return next_state.generated[node] -= 1 node = node.parent # Fall off the graph, check the root if next_state is None and token in self.root.next_tokens(): child = self.root[token] # We can only traverse this edge if it's not saturated if self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) else: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() elif next_state is None: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() return next_state class ConstraintSequence: def __init__(self, sequences: List[List[int]]): """Represents a set of possibly multitoken constraints by concatenating them and internally recording the end points. """ self.sequences = [] self.endpoints = [] self.num_tokens = 0 self.tokens = set() for sequence in sequences: for token in sequence: self.tokens.add(token) self.num_tokens += len(sequence) self.endpoints += [False for x in range(len(sequence) - 1)] + [True] self.sequences += sequence def __getitem__(self, key: int): return self.sequences[key] def __len__(self): return len(self.sequences) def __str__(self): return str(self.sequences) class OrderedConstraintState(ConstraintState): """ Records progress through the set of linear nonbranching constraints with gaps. """ def __init__(self, sequence: ConstraintSequence, state: int = -1): self.sequence = sequence self.state = state @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) return OrderedConstraintState(ConstraintSequence(constraint_list), -1) def __str__(self): return f"{self.state}/{self.bank}x{self.num_completed}" def __copy__(self): return OrderedConstraintState(self.sequence, self.state) def copy(self): return self.__copy__() @property def num_completed(self): if self.state == -1: return 0 count = len( list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1])) ) return count @property def is_root(self): return self.state == -1 @property def name(self): if self.state == -1: return "ROOT" else: return str(self.sequence[self.state]) @property def bank(self) -> int: return self.state + 1 @property def finished(self): return self.state + 1 == len(self.sequence) @property def token_counts(self): return self.sequence.token_counts() @property def tokens(self): return self.sequence.tokens @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" tokens = set() if self.state > 0: tokens.add(self.sequence[0]) if not self.finished: tokens.add(self.sequence[self.state + 1]) return tokens def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) # print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="") if self.finished: # Accept anything next_state = self.copy() elif self.sequence[self.state + 1] == token: # Advance to the next token next_state = OrderedConstraintState(self.sequence, self.state + 1) elif self.sequence.endpoints[self.state]: # Accept anything between constraints (*) next_state = self.copy() elif token == self.sequence[0]: # Start over having generated the first token next_state = OrderedConstraintState(self.sequence, 0) else: # Start over from the root next_state = OrderedConstraintState(self.sequence, -1) return next_state
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/token_generation_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("dummy_masked_lm") class DummyMaskedLMTask(LegacyFairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("--dict-size", default=49995, type=int) parser.add_argument("--dataset-size", default=100000, type=int) parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments " "per sample for BERT dataset", ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary # add mask token self.mask_idx = dictionary.add_symbol("<mask>") dictionary.pad_to_multiple_(8) # often faster if divisible by 8 mask_idx = 0 pad_idx = 1 seq = torch.arange(args.tokens_per_sample) + pad_idx + 1 mask = torch.arange(2, args.tokens_per_sample, 7) # ~15% src = seq.clone() src[mask] = mask_idx tgt = torch.full_like(seq, pad_idx) tgt[mask] = seq[mask] self.dummy_src = src self.dummy_tgt = tgt @classmethod def setup_task(cls, args, **kwargs): """Setup the task. """ dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.args.batch_size is not None: bsz = self.args.batch_size else: bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.args.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.args.tokens_per_sample, }, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/benchmark/dummy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task from omegaconf import II logger = logging.getLogger(__name__) @dataclass class DummyLMConfig(FairseqDataclass): dict_size: int = 49996 dataset_size: int = 100000 tokens_per_sample: int = field( default=512, metadata={"help": "max sequence length"} ) add_bos_token: bool = False batch_size: Optional[int] = II("dataset.batch_size") max_tokens: Optional[int] = II("dataset.max_tokens") max_target_positions: int = II("task.tokens_per_sample") @register_task("dummy_lm", dataclass=DummyLMConfig) class DummyLMTask(FairseqTask): def __init__(self, cfg: DummyLMConfig): super().__init__(cfg) # load dictionary self.dictionary = Dictionary() for i in range(cfg.dict_size): self.dictionary.add_symbol("word{}".format(i)) self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 logger.info("dictionary: {} types".format(len(self.dictionary))) seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 self.dummy_src = seq[:-1] self.dummy_tgt = seq[1:] def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.cfg.batch_size is not None: bsz = self.cfg.batch_size else: bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.cfg.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.cfg.tokens_per_sample, }, num_items=self.cfg.dataset_size, item_size=self.cfg.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/benchmark/dummy_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("dummy_mt") class DummyMTTask(LegacyFairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("--dict-size", default=49996, type=int) parser.add_argument("--dataset-size", default=100000, type=int) parser.add_argument("--src-len", default=30, type=int) parser.add_argument("--tgt-len", default=30, type=int) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed dictionary.pad_to_multiple_(8) # often faster if divisible by 8 self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1 self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1 @classmethod def setup_task(cls, args, **kwargs): """Setup the task. """ dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(dictionary))) args.max_source_positions = args.src_len + dictionary.pad() + 2 args.max_target_positions = args.tgt_len + dictionary.pad() + 2 return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ item_size = max(self.args.src_len, self.args.tgt_len) if self.args.batch_size is not None: bsz = self.args.batch_size else: bsz = max(1, self.args.max_tokens // item_size) tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.args.src_len, dtype=torch.long ), "prev_output_tokens": tgt.clone(), }, "target": tgt, "nsentences": bsz, "ntokens": bsz * self.args.tgt_len, }, num_items=self.args.dataset_size, item_size=item_size, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
EXA-1-master
exa/models/unilm-master/decoding/IAD/fairseq/fairseq/benchmark/dummy_mt.py