python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
from .discriminative_reranking_model import DiscriminativeNMTReranker __all__ = [ "DiscriminativeNMTReranker", ]
EXA-1-master
exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/models/__init__.py
from dataclasses import dataclass, field import os import torch import torch.nn as nn from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( BaseFairseqModel, register_model, ) from fairseq.models.roberta.model import RobertaClassificationHead from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, TransformerSentenceEncoderLayer, ) ACTIVATION_FN_CHOICES = ChoiceEnum(utils.get_available_activation_fns()) JOINT_CLASSIFICATION_CHOICES = ChoiceEnum(["none", "sent"]) SENTENCE_REP_CHOICES = ChoiceEnum(["head", "meanpool", "maxpool"]) def update_init_roberta_model_state(state): """ update the state_dict of a Roberta model for initializing weights of the BertRanker """ for k in list(state.keys()): if ".lm_head." in k or "version" in k: del state[k] continue # remove 'encoder/decoder.sentence_encoder.' from the key assert k.startswith("encoder.sentence_encoder.") or k.startswith( "decoder.sentence_encoder." ), f"Cannot recognize parameter name {k}" if "layernorm_embedding" in k: new_k = k.replace(".layernorm_embedding.", ".emb_layer_norm.") state[new_k[25:]] = state[k] else: state[k[25:]] = state[k] del state[k] class BaseRanker(nn.Module): def __init__(self, args, task): super().__init__() self.separator_token = task.dictionary.eos() self.padding_idx = task.dictionary.pad() def forward(self, src_tokens): raise NotImplementedError def get_segment_labels(self, src_tokens): segment_boundary = (src_tokens == self.separator_token).long() segment_labels = ( segment_boundary.cumsum(dim=1) - segment_boundary - (src_tokens == self.padding_idx).long() ) return segment_labels def get_positions(self, src_tokens, segment_labels): segment_positions = ( torch.arange(src_tokens.shape[1]) .to(src_tokens.device) .repeat(src_tokens.shape[0], 1) ) segment_boundary = (src_tokens == self.separator_token).long() _, col_idx = (segment_positions * segment_boundary).nonzero(as_tuple=True) col_idx = torch.cat([torch.zeros(1).type_as(col_idx), col_idx]) offset = torch.cat( [ torch.zeros(1).type_as(segment_boundary), segment_boundary.sum(dim=1).cumsum(dim=0)[:-1], ] ) segment_positions -= col_idx[segment_labels + offset.unsqueeze(1)] * ( segment_labels != 0 ) padding_mask = src_tokens.ne(self.padding_idx) segment_positions = (segment_positions + 1) * padding_mask.type_as( segment_positions ) + self.padding_idx return segment_positions class BertRanker(BaseRanker): def __init__(self, args, task): super(BertRanker, self).__init__(args, task) init_model = getattr(args, "pretrained_model", "") self.joint_layers = nn.ModuleList() if os.path.isfile(init_model): print(f"initialize weight from {init_model}") from fairseq import hub_utils x = hub_utils.from_pretrained( os.path.dirname(init_model), checkpoint_file=os.path.basename(init_model), ) in_state_dict = x["models"][0].state_dict() init_args = x["args"].model num_positional_emb = init_args.max_positions + task.dictionary.pad() + 1 # follow the setup in roberta self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=getattr( args, "encoder_layers", init_args.encoder_layers ), embedding_dim=init_args.encoder_embed_dim, ffn_embedding_dim=init_args.encoder_ffn_embed_dim, num_attention_heads=init_args.encoder_attention_heads, dropout=init_args.dropout, attention_dropout=init_args.attention_dropout, activation_dropout=init_args.activation_dropout, num_segments=2, # add language embeddings max_seq_len=num_positional_emb, offset_positions_by_padding=False, encoder_normalize_before=True, apply_bert_init=True, activation_fn=init_args.activation_fn, freeze_embeddings=args.freeze_embeddings, n_trans_layers_to_freeze=args.n_trans_layers_to_freeze, ) # still need to learn segment embeddings as we added a second language embedding if args.freeze_embeddings: for p in self.model.segment_embeddings.parameters(): p.requires_grad = False update_init_roberta_model_state(in_state_dict) print("loading weights from the pretrained model") self.model.load_state_dict( in_state_dict, strict=False ) # ignore mismatch in language embeddings ffn_embedding_dim = init_args.encoder_ffn_embed_dim num_attention_heads = init_args.encoder_attention_heads dropout = init_args.dropout attention_dropout = init_args.attention_dropout activation_dropout = init_args.activation_dropout activation_fn = init_args.activation_fn classifier_embed_dim = getattr( args, "embed_dim", init_args.encoder_embed_dim ) if classifier_embed_dim != init_args.encoder_embed_dim: self.transform_layer = nn.Linear( init_args.encoder_embed_dim, classifier_embed_dim ) else: self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.embed_dim, ffn_embedding_dim=args.ffn_embed_dim, num_attention_heads=args.attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, max_seq_len=task.max_positions() if task.max_positions() else args.tokens_per_sample, num_segments=2, offset_positions_by_padding=False, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, ) classifier_embed_dim = args.embed_dim ffn_embedding_dim = args.ffn_embed_dim num_attention_heads = args.attention_heads dropout = args.dropout attention_dropout = args.attention_dropout activation_dropout = args.activation_dropout activation_fn = args.activation_fn self.joint_classification = args.joint_classification if args.joint_classification == "sent": if args.joint_normalize_before: self.joint_layer_norm = LayerNorm(classifier_embed_dim) else: self.joint_layer_norm = None self.joint_layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=classifier_embed_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, ) for _ in range(args.num_joint_layers) ] ) self.classifier = RobertaClassificationHead( classifier_embed_dim, classifier_embed_dim, 1, # num_classes "tanh", args.classifier_dropout, ) def forward(self, src_tokens, src_lengths): segment_labels = self.get_segment_labels(src_tokens) positions = self.get_positions(src_tokens, segment_labels) inner_states, _ = self.model( tokens=src_tokens, segment_labels=segment_labels, last_state_only=True, positions=positions, ) return inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C def sentence_forward(self, encoder_out, src_tokens=None, sentence_rep="head"): # encoder_out: B x T x C if sentence_rep == "head": x = encoder_out[:, :1, :] else: # 'meanpool', 'maxpool' assert src_tokens is not None, "meanpool requires src_tokens input" segment_labels = self.get_segment_labels(src_tokens) padding_mask = src_tokens.ne(self.padding_idx) encoder_mask = segment_labels * padding_mask.type_as(segment_labels) if sentence_rep == "meanpool": ntokens = torch.sum(encoder_mask, dim=1, keepdim=True) x = torch.sum( encoder_out * encoder_mask.unsqueeze(2), dim=1, keepdim=True ) / ntokens.unsqueeze(2).type_as(encoder_out) else: # 'maxpool' encoder_out[ (encoder_mask == 0).unsqueeze(2).repeat(1, 1, encoder_out.shape[-1]) ] = -float("inf") x, _ = torch.max(encoder_out, dim=1, keepdim=True) if hasattr(self, "transform_layer"): x = self.transform_layer(x) return x # B x 1 x C def joint_forward(self, x): # x: T x B x C if self.joint_layer_norm: x = self.joint_layer_norm(x.transpose(0, 1)) x = x.transpose(0, 1) for layer in self.joint_layers: x, _ = layer(x, self_attn_padding_mask=None) return x def classification_forward(self, x): # x: B x T x C return self.classifier(x) @dataclass class DiscriminativeNMTRerankerConfig(FairseqDataclass): pretrained_model: str = field( default="", metadata={"help": "pretrained model to load"} ) sentence_rep: SENTENCE_REP_CHOICES = field( default="head", metadata={ "help": "method to transform the output of the transformer stack to a sentence-level representation" }, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) classifier_dropout: float = field( default=0.0, metadata={"help": "classifier dropout probability"} ) embed_dim: int = field(default=768, metadata={"help": "embedding dimension"}) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) encoder_layers: int = field(default=12, metadata={"help": "num encoder layers"}) attention_heads: int = field(default=8, metadata={"help": "num attention heads"}) encoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each encoder block"} ) apply_bert_init: bool = field( default=False, metadata={"help": "use custom param initialization for BERT"} ) activation_fn: ACTIVATION_FN_CHOICES = field( default="relu", metadata={"help": "activation function to use"} ) freeze_embeddings: bool = field( default=False, metadata={"help": "freeze embeddings in the pretrained model"} ) n_trans_layers_to_freeze: int = field( default=0, metadata={ "help": "number of layers to freeze in the pretrained transformer model" }, ) # joint classfication joint_classification: JOINT_CLASSIFICATION_CHOICES = field( default="none", metadata={"help": "method to compute joint features for classification"}, ) num_joint_layers: int = field( default=1, metadata={"help": "number of joint layers"} ) joint_normalize_before: bool = field( default=False, metadata={"help": "apply layer norm on the input to the joint layer"}, ) @register_model( "discriminative_nmt_reranker", dataclass=DiscriminativeNMTRerankerConfig ) class DiscriminativeNMTReranker(BaseFairseqModel): @classmethod def build_model(cls, args, task): model = BertRanker(args, task) return DiscriminativeNMTReranker(args, model) def __init__(self, args, model): super().__init__() self.model = model self.sentence_rep = args.sentence_rep self.joint_classification = args.joint_classification def forward(self, src_tokens, src_lengths, **kwargs): return self.model(src_tokens, src_lengths) def sentence_forward(self, encoder_out, src_tokens): return self.model.sentence_forward(encoder_out, src_tokens, self.sentence_rep) def joint_forward(self, x): return self.model.joint_forward(x) def classification_forward(self, x): return self.model.classification_forward(x)
EXA-1-master
exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/models/discriminative_reranking_model.py
#!/usr/bin/env python import argparse from multiprocessing import Pool from pathlib import Path import sacrebleu import sentencepiece as spm def read_text_file(filename): with open(filename, "r") as f: output = [line.strip() for line in f] return output def get_bleu(in_sent, target_sent): bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]]) out = " ".join( map(str, [bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts + bleu.totals) ) return out def get_ter(in_sent, target_sent): ter = sacrebleu.corpus_ter([in_sent], [[target_sent]]) out = " ".join(map(str, [ter.score, ter.num_edits, ter.ref_length])) return out def init(sp_model): global sp sp = spm.SentencePieceProcessor() sp.Load(sp_model) def process(source_sent, target_sent, hypo_sent, metric): source_bpe = " ".join(sp.EncodeAsPieces(source_sent)) hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent] if metric == "bleu": score_str = [get_bleu(h, target_sent) for h in hypo_sent] else: # ter score_str = [get_ter(h, target_sent) for h in hypo_sent] return source_bpe, hypo_bpe, score_str def main(args): assert ( args.split.startswith("train") or args.num_shards == 1 ), "--num-shards should be set to 1 for valid and test sets" assert ( args.split.startswith("train") or args.split.startswith("valid") or args.split.startswith("test") ), "--split should be set to train[n]/valid[n]/test[n]" source_sents = read_text_file(args.input_source) target_sents = read_text_file(args.input_target) num_sents = len(source_sents) assert num_sents == len( target_sents ), f"{args.input_source} and {args.input_target} should have the same number of sentences." hypo_sents = read_text_file(args.input_hypo) assert ( len(hypo_sents) % args.beam == 0 ), f"Number of hypotheses ({len(hypo_sents)}) cannot be divided by beam size ({args.beam})." hypo_sents = [ hypo_sents[i : i + args.beam] for i in range(0, len(hypo_sents), args.beam) ] assert num_sents == len( hypo_sents ), f"{args.input_hypo} should contain {num_sents * args.beam} hypotheses but only has {len(hypo_sents) * args.beam}. (--beam={args.beam})" output_dir = args.output_dir / args.metric for ns in range(args.num_shards): print(f"processing shard {ns+1}/{args.num_shards}") shard_output_dir = output_dir / f"split{ns+1}" source_output_dir = shard_output_dir / "input_src" hypo_output_dir = shard_output_dir / "input_tgt" metric_output_dir = shard_output_dir / args.metric source_output_dir.mkdir(parents=True, exist_ok=True) hypo_output_dir.mkdir(parents=True, exist_ok=True) metric_output_dir.mkdir(parents=True, exist_ok=True) if args.n_proc > 1: with Pool( args.n_proc, initializer=init, initargs=(args.sentencepiece_model,) ) as p: output = p.starmap( process, [ (source_sents[i], target_sents[i], hypo_sents[i], args.metric) for i in range(ns, num_sents, args.num_shards) ], ) else: init(args.sentencepiece_model) output = [ process(source_sents[i], target_sents[i], hypo_sents[i], args.metric) for i in range(ns, num_sents, args.num_shards) ] with open(source_output_dir / f"{args.split}.bpe", "w") as s_o, open( hypo_output_dir / f"{args.split}.bpe", "w" ) as h_o, open(metric_output_dir / f"{args.split}.{args.metric}", "w") as m_o: for source_bpe, hypo_bpe, score_str in output: assert len(hypo_bpe) == len(score_str) for h, m in zip(hypo_bpe, score_str): s_o.write(f"{source_bpe}\n") h_o.write(f"{h}\n") m_o.write(f"{m}\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input-source", type=Path, required=True) parser.add_argument("--input-target", type=Path, required=True) parser.add_argument("--input-hypo", type=Path, required=True) parser.add_argument("--output-dir", type=Path, required=True) parser.add_argument("--split", type=str, required=True) parser.add_argument("--beam", type=int, required=True) parser.add_argument("--sentencepiece-model", type=str, required=True) parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") parser.add_argument("--num-shards", type=int, default=1) parser.add_argument("--n-proc", type=int, default=8) args = parser.parse_args() main(args)
EXA-1-master
exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/scripts/prep_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import ChoiceEnum, FairseqDataclass _EPSILON = torch.finfo(torch.float32).eps TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"]) @dataclass class KLDivergenceRerankingCriterionConfig(FairseqDataclass): target_dist_norm: TARGET_DIST_NORM_CHOICES = field( default="none", metadata={"help": "method to normalize the range of target scores"}, ) temperature: float = field( default=1.0, metadata={"help": "temperature in softmax for target distributions"}, ) forward_batch_size: int = field( default=32, metadata={ "help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)" }, ) @register_criterion( "kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig ) class KLDivergenceRerankingCriterion(FairseqCriterion): def __init__( self, task, target_dist_norm, temperature, forward_batch_size, ): super().__init__(task) self.target_dist_norm = target_dist_norm self.temperature = temperature self.forward_batch_size = forward_batch_size def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ sample_size = sample["id"].numel() assert sample_size % self.task.cfg.mt_beam == 0, ( f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})." f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}." ) # split into smaller batches for model forward batch_out = [] for i in range(0, sample_size, self.forward_batch_size): j = min(i + self.forward_batch_size, sample_size) out = model( src_tokens=sample["net_input"]["src_tokens"][i:j, :], src_lengths=sample["net_input"]["src_lengths"][i:j], ) batch_out.append( model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :]) ) batch_out = torch.cat(batch_out, dim=0).view( self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1 ) # T x B x C if model.joint_classification == "sent": batch_out = model.joint_forward(batch_out) scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view( -1, self.task.cfg.mt_beam ) # input: B x T x C loss = self.compute_kl_loss( scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam) ) sample_size = sample_size // self.task.cfg.mt_beam logging_output = { "loss": loss.detach(), "ntokens": sample["ntokens"], "nsentences": sample_size * self.task.cfg.mt_beam, "sample_size": sample_size, "scores": scores.detach(), } return loss, sample_size, logging_output def compute_kl_loss(self, logits, target): norm_target = target if self.target_dist_norm == "minmax": min_v = torch.min(target, 1, keepdim=True).values max_v = torch.max(target, 1, keepdim=True).values norm_target = (target - min_v) / (max_v - min_v + _EPSILON) target_dist = F.softmax( norm_target / self.temperature, dim=-1, dtype=torch.float32 ) model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum() return loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = loss_sum / sample_size / math.log(2) metrics.log_scalar("loss", loss, sample_size, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion __all__ = [ "KLDivergenceRerankingCriterion", ]
EXA-1-master
exa/models/unilm-master/edgelm/examples/discriminative_reranking_nmt/criterions/__init__.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.sequence_generator import EnsembleModel from fairseq.utils import safe_hasattr def get_avg_pool( models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False ): model = EnsembleModel(models) # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32) encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype( np.float32 ) encoder_mask = np.expand_dims(encoder_mask.T, axis=2) if has_langtok: encoder_mask = encoder_mask[1:, :, :] np_encoder_outs = np_encoder_outs[1, :, :] masked_encoder_outs = encoder_mask * np_encoder_outs avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0) return avg_pool def main(args): assert args.path is not None, "--path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" args.beam = 1 utils.import_user_module(args) if args.max_tokens is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble print("| loading model(s) from {}".format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(":"), arg_overrides=eval(args.model_overrides), task=task, ) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_positions=utils.resolve_max_positions( task.max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) num_sentences = 0 source_sentences = [] shard_id = 0 all_avg_pool = None encoder_has_langtok = ( safe_hasattr(task.args, "encoder_langtok") and task.args.encoder_langtok is not None and safe_hasattr(task.args, "lang_tok_replacing_bos_eos") and not task.args.lang_tok_replacing_bos_eos ) with progress_bar.build_progress_bar(args, itr) as t: for sample in t: if sample is None: print("Skipping None") continue sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] with torch.no_grad(): avg_pool = get_avg_pool( models, sample, prefix_tokens, src_dict, args.post_process, has_langtok=encoder_has_langtok, ) if all_avg_pool is not None: all_avg_pool = np.concatenate((all_avg_pool, avg_pool)) else: all_avg_pool = avg_pool if not isinstance(sample["id"], list): sample_ids = sample["id"].tolist() else: sample_ids = sample["id"] for i, sample_id in enumerate(sample_ids): # Remove padding src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.post_process) else: src_str = "" if not args.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str)) source_sentences.append(f"{sample_id}\t{src_str}") num_sentences += sample["nsentences"] if all_avg_pool.shape[0] >= 1000000: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w", ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w", ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) all_avg_pool = None source_sentences = [] shard_id += 1 if all_avg_pool is not None: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w" ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w" ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) return None def cli_main(): parser = options.get_generation_parser() parser.add_argument( "--encoder-save-dir", default="", type=str, metavar="N", help="directory to save encoder outputs", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/examples/criss/save_encoder.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob from subprocess import check_call try: import faiss has_faiss = True except ImportError: has_faiss = False import numpy as np GB = 1024 * 1024 * 1024 def call(cmd): print(cmd) check_call(cmd, shell=True) def get_batches(directory, lang, prefix="all_avg_pool"): print(f"Finding in {directory}/{prefix}.{lang}*") files = glob.glob(f"{directory}/{prefix}.{lang}*") emb_files = [] txt_files = [] for emb_fi in files: emb_files.append(emb_fi) txt_fi = emb_fi.replace(prefix, "sentences") txt_files.append(txt_fi) return emb_files, txt_files def load_batch(emb_file, dim): embeddings = np.fromfile(emb_file, dtype=np.float32) num_rows = int(embeddings.shape[0] / dim) embeddings = embeddings.reshape((num_rows, dim)) faiss.normalize_L2(embeddings) return embeddings def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction="x2y"): if not has_faiss: raise ImportError("Please install Faiss") sims = [] inds = [] xfrom = 0 xto = 0 for x_batch_f in x_batches_f: yfrom = 0 yto = 0 x_batch = load_batch(x_batch_f, dim) xto = xfrom + x_batch.shape[0] bsims, binds = [], [] for y_batch_f in y_batches_f: y_batch = load_batch(y_batch_f, dim) neighbor_size = min(k, y_batch.shape[0]) yto = yfrom + y_batch.shape[0] print("{}-{} -> {}-{}".format(xfrom, xto, yfrom, yto)) idx = faiss.IndexFlatIP(dim) idx = faiss.index_cpu_to_all_gpus(idx) idx.add(y_batch) bsim, bind = idx.search(x_batch, neighbor_size) bsims.append(bsim) binds.append(bind + yfrom) yfrom += y_batch.shape[0] del idx del y_batch bsims = np.concatenate(bsims, axis=1) binds = np.concatenate(binds, axis=1) aux = np.argsort(-bsims, axis=1) sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32) ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64) for i in range(x_batch.shape[0]): for j in range(k): sim_batch[i, j] = bsims[i, aux[i, j]] ind_batch[i, j] = binds[i, aux[i, j]] sims.append(sim_batch) inds.append(ind_batch) xfrom += x_batch.shape[0] del x_batch sim = np.concatenate(sims, axis=0) ind = np.concatenate(inds, axis=0) return sim, ind def score(sim, fwd_mean, bwd_mean, margin): return margin(sim, (fwd_mean + bwd_mean) / 2) def score_candidates( sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False ): print(" - scoring {:d} candidates".format(sim_mat.shape[0])) scores = np.zeros(candidate_inds.shape) for i in range(scores.shape[0]): for j in range(scores.shape[1]): k = int(candidate_inds[i, j]) scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin) return scores def load_text(files): all_sentences = [] for fi in files: with open(fi) as sentence_fi: for line in sentence_fi: all_sentences.append(line.strip()) print(f"Read {len(all_sentences)} sentences") return all_sentences if __name__ == "__main__": parser = argparse.ArgumentParser(description="Mine bitext") parser.add_argument("--src-lang", help="Source language") parser.add_argument("--tgt-lang", help="Target language") parser.add_argument( "--dict-path", help="Path to dictionary file", default="dict.txt" ) parser.add_argument( "--spm-path", help="Path to SPM model file", default="sentence.bpe.model" ) parser.add_argument("--dim", type=int, default=1024, help="Embedding dimension") parser.add_argument("--mem", type=int, default=5, help="Memory in GB") parser.add_argument("--src-dir", help="Source directory") parser.add_argument("--tgt-dir", help="Target directory") parser.add_argument("--output", help="Output path") parser.add_argument( "--neighborhood", type=int, default=4, help="Embedding dimension" ) parser.add_argument( "--threshold", type=float, default=1.06, help="Threshold on mined bitext" ) parser.add_argument( "--valid-size", type=int, default=2000, help="Number of sentences used for validation set", ) parser.add_argument( "--min-count", type=int, default=50000, help="Min num sentences used for each language", ) args = parser.parse_args() x_batches_f, x_sents_f = get_batches(args.src_dir, args.src_lang) y_batches_f, y_sents_f = get_batches(args.tgt_dir, args.tgt_lang) margin = lambda a, b: a / b y2x_sim, y2x_ind = knnGPU_sharded( y_batches_f, x_batches_f, args.dim, args.neighborhood, direction="y2x" ) x2y_sim, x2y_ind = knnGPU_sharded( x_batches_f, y_batches_f, args.dim, args.neighborhood, direction="x2y" ) x2y_mean = x2y_sim.mean(axis=1) y2x_mean = y2x_sim.mean(axis=1) fwd_scores = score_candidates(x2y_sim, x2y_ind, x2y_mean, y2x_mean, margin) bwd_scores = score_candidates(y2x_sim, y2x_ind, y2x_mean, x2y_mean, margin) fwd_best = x2y_ind[np.arange(x2y_sim.shape[0]), fwd_scores.argmax(axis=1)] bwd_best = y2x_ind[np.arange(y2x_sim.shape[0]), bwd_scores.argmax(axis=1)] indices = np.stack( ( np.concatenate((np.arange(x2y_ind.shape[0]), bwd_best)), np.concatenate((fwd_best, np.arange(y2x_ind.shape[0]))), ), axis=1, ) scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1))) x_sentences = load_text(x_sents_f) y_sentences = load_text(y_sents_f) threshold = args.threshold min_count = args.min_count seen_src, seen_trg = set(), set() directory = args.output call(f"mkdir -p {directory}") src_out = open( f"{directory}/all.{args.src_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) tgt_out = open( f"{directory}/all.{args.tgt_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) scores_out = open( f"{directory}/all.scores", mode="w", encoding="utf-8", errors="surrogateescape" ) count = 0 for i in np.argsort(-scores): src_ind, trg_ind = indices[i] if src_ind not in seen_src and trg_ind not in seen_trg: seen_src.add(src_ind) seen_trg.add(trg_ind) if scores[i] > threshold or count < min_count: if x_sentences[src_ind]: print(scores[i], file=scores_out) print(x_sentences[src_ind], file=src_out) print(y_sentences[trg_ind], file=tgt_out) count += 1 else: print(f"Ignoring sentence: {x_sentences[src_ind]}") src_out.close() tgt_out.close() scores_out.close() print(f"Found {count} pairs for threshold={threshold}") with open(f"{directory}/all.{args.src_lang}") as all_s, open( f"{directory}/all.{args.tgt_lang}" ) as all_t, open(f"{directory}/valid.{args.src_lang}", "w") as valid_s, open( f"{directory}/valid.{args.tgt_lang}", "w" ) as valid_t, open( f"{directory}/train.{args.src_lang}", "w" ) as train_s, open( f"{directory}/train.{args.tgt_lang}", "w" ) as train_t: count = 0 for s_line, t_line in zip(all_s, all_t): s_line = s_line.split("\t")[1] t_line = t_line.split("\t")[1] if count >= args.valid_size: train_s.write(s_line) train_t.write(t_line) else: valid_s.write(s_line) valid_t.write(t_line) count += 1
EXA-1-master
exa/models/unilm-master/edgelm/examples/criss/mining/mine.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob import numpy as np DIM = 1024 def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False): target_ids = [tid for tid in target_embs] source_mat = np.stack(source_embs.values(), axis=0) normalized_source_mat = source_mat / np.linalg.norm( source_mat, axis=1, keepdims=True ) target_mat = np.stack(target_embs.values(), axis=0) normalized_target_mat = target_mat / np.linalg.norm( target_mat, axis=1, keepdims=True ) sim_mat = normalized_source_mat.dot(normalized_target_mat.T) if return_sim_mat: return sim_mat neighbors_map = {} for i, sentence_id in enumerate(source_embs): idx = np.argsort(sim_mat[i, :])[::-1][:k] neighbors_map[sentence_id] = [target_ids[tid] for tid in idx] return neighbors_map def load_embeddings(directory, LANGS): sentence_embeddings = {} sentence_texts = {} for lang in LANGS: sentence_embeddings[lang] = {} sentence_texts[lang] = {} lang_dir = f"{directory}/{lang}" embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*") for embed_file in embedding_files: shard_id = embed_file.split(".")[-1] embeddings = np.fromfile(embed_file, dtype=np.float32) num_rows = embeddings.shape[0] // DIM embeddings = embeddings.reshape((num_rows, DIM)) with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file: for idx, line in enumerate(sentence_file): sentence_id, sentence = line.strip().split("\t") sentence_texts[lang][sentence_id] = sentence sentence_embeddings[lang][sentence_id] = embeddings[idx, :] return sentence_embeddings, sentence_texts def compute_accuracy(directory, LANGS): sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS) top_1_accuracy = {} top1_str = " ".join(LANGS) + "\n" for source_lang in LANGS: top_1_accuracy[source_lang] = {} top1_str += f"{source_lang} " for target_lang in LANGS: top1 = 0 top5 = 0 neighbors_map = compute_dist( sentence_embeddings[source_lang], sentence_embeddings[target_lang] ) for sentence_id, neighbors in neighbors_map.items(): if sentence_id == neighbors[0]: top1 += 1 if sentence_id in neighbors[:5]: top5 += 1 n = len(sentence_embeddings[target_lang]) top1_str += f"{top1/n} " top1_str += "\n" print(top1_str) print(top1_str, file=open(f"{directory}/accuracy", "w")) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Analyze encoder outputs") parser.add_argument("directory", help="Source language corpus") parser.add_argument("--langs", help="List of langs") args = parser.parse_args() langs = args.langs.split(",") compute_accuracy(args.directory, langs)
EXA-1-master
exa/models/unilm-master/edgelm/examples/criss/sentence_retrieval/encoder_analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.search import Search class NoisyChannelBeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.fw_scores_buf = None self.lm_scores_buf = None def _init_buffers(self, t): # super()._init_buffers(t) if self.fw_scores_buf is None: self.scores_buf = t.new() self.indices_buf = torch.LongTensor().to(device=t.device) self.beams_buf = torch.LongTensor().to(device=t.device) self.fw_scores_buf = t.new() self.lm_scores_buf = t.new() def combine_fw_bw(self, combine_method, fw_cum, bw, step): if combine_method == "noisy_channel": fw_norm = fw_cum.div(step + 1) lprobs = bw + fw_norm elif combine_method == "lm_only": lprobs = bw + fw_cum return lprobs def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method): self._init_buffers(fw_lprobs) bsz, beam_size, vocab_size = fw_lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous() bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous() # nothing to add since we are at the first step fw_lprobs_cum = fw_lprobs else: # make probs contain cumulative scores for each hypothesis raw_scores = (scores[:, :, step - 1].unsqueeze(-1)) fw_lprobs_cum = (fw_lprobs.add(raw_scores)) combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step) # choose the top k according to the combined noisy channel model score torch.topk( combined_lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), out=(self.scores_buf, self.indices_buf), ) # save corresponding fw and lm scores self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf) self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf) # Project back into relative indices and beams self.beams_buf = self.indices_buf // vocab_size self.indices_buf.fmod_(vocab_size) return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
EXA-1-master
exa/models/unilm-master/edgelm/examples/fast_noisy_channel/noisy_channel_beam_search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import noisy_channel_translation # noqa from . import noisy_channel_sequence_generator # noqa from . import noisy_channel_beam_search # noqa
EXA-1-master
exa/models/unilm-master/edgelm/examples/fast_noisy_channel/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import math import numpy as np import torch import torch.nn.functional as F from torch import Tensor from .noisy_channel_beam_search import NoisyChannelBeamSearch from fairseq.sequence_generator import EnsembleModel class NoisyChannelSequenceGenerator(object): def __init__( self, combine_method, tgt_dict, src_dict=None, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, len_penalty=1.0, unk_penalty=0.0, retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, normalize_scores=True, channel_models=None, k2=10, ch_weight=1.0, channel_scoring_type='log_norm', top_k_vocab=0, lm_models=None, lm_dict=None, lm_weight=1.0, normalize_lm_scores_by_tgt_len=False, ): """Generates translations of a given source sentence, using beam search with noisy channel decoding. Args: combine_method (string, optional): Method to combine direct, LM and channel model scores (default: None) tgt_dict (~fairseq.data.Dictionary): target dictionary src_dict (~fairseq.data.Dictionary): source dictionary beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) no_repeat_ngram_size (int, optional): Size of n-grams that we avoid repeating in the generation (default: 0) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) channel_models (List[~fairseq.models.FairseqModel]): ensemble of models translating from the target to the source k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) ch_weight (int, optional): Weight associated with the channel model score assuming that the direct model score has weight 1.0 (default: 1.0) channel_scoring_type (str, optional): String specifying how to score the channel model (default: 'log_norm') top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or `'src_vocab_batched'`, then this parameter specifies the number of most frequent tokens to include in the channel model output vocabulary, in addition to the source tokens in the input batch (default: 0) lm_models (List[~fairseq.models.FairseqModel]): ensemble of models generating text in the target language lm_dict (~fairseq.data.Dictionary): LM Model dictionary lm_weight (int, optional): Weight associated with the LM model score assuming that the direct model score has weight 1.0 (default: 1.0) normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores by the target length? By default, we normalize the combination of LM and channel model scores by the source length """ self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size self.channel_models = channel_models self.src_dict = src_dict self.tgt_dict = tgt_dict self.combine_method = combine_method self.k2 = k2 self.ch_weight = ch_weight self.channel_scoring_type = channel_scoring_type self.top_k_vocab = top_k_vocab self.lm_models = lm_models self.lm_dict = lm_dict self.lm_weight = lm_weight self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len self.share_tgt_dict = (self.lm_dict == self.tgt_dict) self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) self.ch_scoring_bsz = 3072 assert temperature > 0, '--temperature must be greater than 0' self.search = NoisyChannelBeamSearch(tgt_dict) @torch.no_grad() def generate( self, models, sample, prefix_tokens=None, bos_token=None, **kwargs ): """Generate a batch of translations. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens """ model = EnsembleModel(models) incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(model.models_size) ], ) if not self.retain_dropout: model.eval() # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample['net_input'].items() if k != 'prev_output_tokens' } src_tokens = encoder_input['src_tokens'] src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) input_size = src_tokens.size() # batch dimension goes first followed by source lengths bsz = input_size[0] src_len = input_size[1] beam_size = self.beam_size if self.match_source_len: max_len = src_lengths_no_eos.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker model.max_decoder_positions() - 1, ) # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) src_lengths = encoder_input['src_lengths'] # initialize buffers scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) scores_buf = scores.clone() tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) tokens_buf = tokens.clone() tokens[:, 0] = self.eos if bos_token is None else bos_token # reorder source tokens so they may be used as a reference in generating P(S|T) src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) attn, attn_buf = None, None nonpad_idxs = None # The cands_to_ignore indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then the cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask # list of completed sentences finalized = [[] for i in range(bsz)] finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) # helper function for allocating buffers on the fly buffers = {} def buffer(name, type_of=tokens): # noqa if name not in buffers: buffers[name] = type_of.new() return buffers[name] def is_finished(sent, step, unfin_idx): """ Check whether we've finished generation for a given sentence, by comparing the worst score among finalized hypotheses to the best possible score among unfinalized hypotheses. """ assert len(finalized[sent]) <= beam_size if len(finalized[sent]) == beam_size: return True return False def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): """ Finalize the given hypotheses at this step, while keeping the total number of finalized hypotheses per sentence <= beam_size. Note: the input must be in the desired finalization order, so that hypotheses that appear earlier in the input are preferred to those that appear later. Args: step: current time step bbsz_idx: A vector of indices in the range [0, bsz*beam_size), indicating which hypotheses to finalize eos_scores: A vector of the same size as bbsz_idx containing fw scores for each hypothesis combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing combined noisy channel scores for each hypothesis """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors tokens_clone = tokens.index_select(0, bbsz_idx) tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS assert not tokens_clone.eq(self.eos).any() tokens_clone[:, step] = self.eos attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty cum_unfin = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) sents_seen = set() for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): unfin_idx = idx // beam_size sent = unfin_idx + cum_unfin[unfin_idx] sents_seen.add((sent, unfin_idx)) if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: score = -math.inf def get_hypo(): if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i][nonpad_idxs[sent]] _, alignment = hypo_attn.max(dim=0) else: hypo_attn = None alignment = None return { 'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, # src_len x tgt_len 'alignment': alignment, 'positional_scores': pos_scores[i], } if len(finalized[sent]) < beam_size: finalized[sent].append(get_hypo()) newly_finished = [] for sent, unfin_idx in sents_seen: # check termination conditions for this sentence if not finished[sent] and is_finished(sent, step, unfin_idx): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): """Rescore the top k hypothesis from each beam using noisy channel modeling Returns: new_fw_lprobs: the direct model probabilities after pruning the top k new_ch_lm_lprobs: the combined channel and language model probabilities new_lm_lprobs: the language model probabilities after pruning the top k """ with torch.no_grad(): lprobs_size = lprobs.size() if prefix_tokens is not None and step < prefix_tokens.size(1): probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] cand_scores = torch.gather( probs_slice, dim=1, index=prefix_tokens[:, step].view(-1, 1).data ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) # need to calculate and save fw and lm probs for prefix tokens fw_top_k = cand_scores fw_top_k_idx = cand_indices k = 1 else: # take the top k best words for every sentence in batch*beam fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) if self.combine_method != "lm_only": temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index cur_tgt_size = step+2 # add eos to all candidate sentences except those that already end in eos eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) eos_tokens[eos_idx] = self.tgt_dict.pad_index if step == 0: channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) else: # move eos from beginning to end of target sentence channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) ch_input_lengths[eos_idx] = cur_tgt_size-1 if self.channel_scoring_type == "unnormalized": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) ch_intermed_scores = ch_intermed_scores.float() ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) elif self.channel_scoring_type == "k2_separate": for k_idx in range(k): k_eos_tokens = eos_tokens[k_idx::k, :] if step == 0: k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) else: # move eos from beginning to end of target sentence k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) k_ch_input_lengths = ch_input_lengths[k_idx::k] k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) k_ch_intermed_scores *= not_padding.float() ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) elif self.channel_scoring_type == "src_vocab": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_lprobs = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab) ch_scores = torch.sum(ch_lprobs, dim=1) elif self.channel_scoring_type == "src_vocab_batched": ch_bsz_size = temp_src_tokens_full.shape[0] ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] channel_input_batch = channel_input[start_idx:end_idx, :] ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) ch_lprobs_list[i] = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output_batch, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab, start_idx=start_idx, end_idx=end_idx) ch_lprobs = torch.cat(ch_lprobs_list, dim=0) ch_scores = torch.sum(ch_lprobs, dim=1) else: ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) else: cur_tgt_size = 0 ch_scores = ch_scores.view(bsz*beam_size, k) expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() if self.share_tgt_dict: lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) else: new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) lm_scores.add_(expanded_lm_prefix_scores) ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) # initialize all as min value new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_fw_lprobs[:, self.pad] = -math.inf new_ch_lm_lprobs[:, self.pad] = -math.inf new_lm_lprobs[:, self.pad] = -math.inf new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): if self.channel_scoring_type == "unnormalized": ch_scores = self.log_softmax_fn( ch_scores.view(-1, self.beam_size * self.k2) ).view(ch_scores.shape) ch_scores = ch_scores * self.ch_weight lm_scores1 = lm_scores1 * self.lm_weight if combine_type == "lm_only": # log P(T|S) + log P(T) ch_scores = lm_scores1.view(ch_scores.size()) elif combine_type == "noisy_channel": # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) if self.normalize_lm_scores_by_tgt_len: ch_scores.div_(src_size) lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) ch_scores.add_(lm_scores_norm) # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) else: ch_scores.add_(lm_scores1.view(ch_scores.size())) ch_scores.div_(src_size) return ch_scores if self.channel_models is not None: channel_model = self.channel_models[0] # assume only one channel_model model else: channel_model = None lm = EnsembleModel(self.lm_models) lm_incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(lm.models_size) ], ) reorder_state = None batch_idxs = None for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) lm.reorder_incremental_state(lm_incremental_states, reorder_state) fw_lprobs, avg_attn_scores = model.forward_decoder( tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, ) fw_lprobs[:, self.pad] = -math.inf # never select pad fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) # handle min and max length constraints if step >= max_len: fw_lprobs[:, :self.eos] = -math.inf fw_lprobs[:, self.eos + 1:] = -math.inf elif step < self.min_len: fw_lprobs[:, self.eos] = -math.inf # handle prefix tokens (possibly with different lengths) if prefix_tokens is not None and step < prefix_tokens.size(1): prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_mask = prefix_toks.ne(self.pad) prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) fw_lprobs[prefix_mask] = -math.inf fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs ) prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) ch_lm_lprobs[prefix_mask] = -math.inf ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs ) prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) lm_lprobs[prefix_mask] = -math.inf lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() def replicate_first_beam(tensor, mask): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) # copy tokens, scores and lprobs from the first beam to all beams tokens = replicate_first_beam(tokens, eos_mask_batch_dim) scores = replicate_first_beam(scores, eos_mask_batch_dim) fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) if self.no_repeat_ngram_size > 0: # for each beam and batch sentence, generate a list of previous ngrams gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): gen_tokens = tokens[bbsz_idx].tolist() for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] # Record attention scores if avg_attn_scores is not None: if attn is None: attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) attn_buf = attn.clone() nonpad_idxs = src_tokens.ne(self.pad) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(fw_lprobs) scores_buf = scores_buf.type_as(fw_lprobs) self.search.set_src_lengths(src_lengths_no_eos) if self.no_repeat_ngram_size > 0: def calculate_banned_tokens(bbsz_idx): # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) return gen_ngrams[bbsz_idx].get(ngram_index, []) if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] else: banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( step, fw_lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos (except for candidates to be ignored) eos_mask = cand_indices.eq(self.eos) eos_mask[:, :beam_size] &= ~cands_to_ignore # only consider eos when it's among the top beam_size indices eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = set() if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] ) combined_noisy_channel_eos_scores = torch.masked_select( combined_noisy_channel_scores[:, :beam_size], mask=eos_mask[:, :beam_size], ) # finalize hypo using channel model score finalized_sents = finalize_hypos( step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = cand_indices.new_ones(bsz) batch_mask[cand_indices.new(finalized_sents)] = 0 batch_idxs = torch.nonzero(batch_mask).squeeze(-1) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths_no_eos = src_lengths_no_eos[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) scores_buf.resize_as_(scores) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens_buf.resize_as_(tokens) src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) attn_buf.resize_as_(attn) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos or # ignored hypos and values < cand_size indicate candidate # active hypos. After this, the min values per row are the top # candidate active hypos. eos_mask[:, :beam_size] |= cands_to_ignore active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just the hypos # with the smallest values in active_mask active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') torch.topk( active_mask, k=beam_size, dim=1, largest=False, out=(new_cands_to_ignore, active_hypos) ) # update cands_to_ignore to ignore any finalized hypos cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] assert (~cands_to_ignore).any(dim=1).all() active_bbsz_idx = buffer('active_bbsz_idx') torch.gather( cand_bbsz_idx, dim=1, index=active_hypos, out=active_bbsz_idx, ) active_scores = torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores[:, step].view(bsz, beam_size), ) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses torch.index_select( tokens[:, :step + 1], dim=0, index=active_bbsz_idx, out=tokens_buf[:, :step + 1], ) torch.gather( cand_indices, dim=1, index=active_hypos, out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], ) if step > 0: torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx, out=scores_buf[:, :step], ) torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores_buf.view(bsz, beam_size, -1)[:, :, step], ) torch.gather( lm_lprobs_top_k, dim=1, index=active_hypos, out=lm_prefix_scores.view(bsz, beam_size) ) # copy attention for active hypotheses if attn is not None: torch.index_select( attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, out=attn_buf[:, :, :step + 2], ) # swap buffers tokens, tokens_buf = tokens_buf, tokens scores, scores_buf = scores_buf, scores if attn is not None: attn, attn_buf = attn_buf, attn # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) return finalized def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): with torch.no_grad(): lm_lprobs, avg_attn_scores = model.forward_decoder( input_tokens, encoder_outs=None, incremental_states=incremental_states, ) lm_lprobs_size = lm_lprobs.size(0) probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) return probs_next_wrd def make_dict2dict(old_dict, new_dict): dict2dict_map = {} for sym in old_dict.symbols: dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) return dict2dict_map def dict2dict(tokens, dict2dict_map): if tokens.device == torch.device('cpu'): tokens_tmp = tokens else: tokens_tmp = tokens.cpu() return tokens_tmp.map_( tokens_tmp, lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] ).to(tokens.device) def reorder_tokens(tokens, lengths, eos): # reorder source tokens so they may be used as reference for P(S|T) return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) def reorder_all_tokens(tokens, lengths, eos): # used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>] # so source tokens can be used to predict P(S|T) return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) def normalized_scores_with_batch_vocab( model_decoder, features, target_ids, k, bsz, beam_size, pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, end_idx=None, **kwargs): """ Get normalized probabilities (or log probs) from a net's output w.r.t. vocab consisting of target IDs in the batch """ if model_decoder.adaptive_softmax is None: weight = model_decoder.output_projection.weight vocab_ids = torch.unique( torch.cat( (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) ) ) id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) mapped_target_ids = target_ids.cpu().apply_( lambda x, id_map=id_map: id_map[x] ).to(target_ids.device) expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) if start_idx is not None and end_idx is not None: expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] logits = F.linear(features, weight[vocab_ids, :]) log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) intermed_scores = torch.gather( log_softmax[:, :-1, :], 2, expanded_target_ids[:, 1:].unsqueeze(2), ).squeeze() not_padding = expanded_target_ids[:, 1:] != pad_idx intermed_scores *= not_padding.float() return intermed_scores else: raise ValueError("adaptive softmax doesn't work with " + "`normalized_scores_with_batch_vocab()`")
EXA-1-master
exa/models/unilm-master/edgelm/examples/fast_noisy_channel/noisy_channel_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks.translation import TranslationTask from fairseq.tasks.language_modeling import LanguageModelingTask from fairseq import checkpoint_utils import argparse from fairseq.tasks import register_task import torch @register_task("noisy_channel_translation") class NoisyChannelTranslation(TranslationTask): """ Rescore the top k candidates from each beam using noisy channel modeling """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" TranslationTask.add_args(parser) # fmt: off parser.add_argument('--channel-model', metavar='FILE', help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') parser.add_argument('--combine-method', default='lm_only', choices=['lm_only', 'noisy_channel'], help="""method for combining direct and channel model scores. lm_only: decode with P(T|S)P(T) noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, help='normalize lm score by target length instead of source length') parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], help="Normalize bw scores with log softmax or return bw scores without log softmax") parser.add_argument('--top-k-vocab', default=0, type=int, help='top k vocab IDs to use with `src_vocab` in channel model scoring') parser.add_argument('--k2', default=50, type=int, help='the top k2 candidates to rescore with the noisy channel model for each beam') parser.add_argument('--ch-wt', default=1, type=float, help='weight for the channel model') parser.add_argument('--lm-model', metavar='FILE', help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') parser.add_argument('--lm-data', metavar='FILE', help='path to lm model training data for target language, used to properly load LM with correct dictionary') parser.add_argument('--lm-wt', default=1, type=float, help='the weight of the lm in joint decoding') # fmt: on def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if getattr(args, "score_reference", False): raise NotImplementedError() else: from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator use_cuda = torch.cuda.is_available() and not self.args.cpu assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' if self.args.channel_model is not None: import copy ch_args_task = copy.deepcopy(self.args) tmp = ch_args_task.source_lang ch_args_task.source_lang = ch_args_task.target_lang ch_args_task.target_lang = tmp ch_args_task._name = 'translation' channel_task = TranslationTask.setup_task(ch_args_task) arg_dict = {} arg_dict['task'] = 'language_modeling' arg_dict['sample_break_mode'] = 'eos' arg_dict['data'] = self.args.lm_data arg_dict['output_dictionary_size'] = -1 lm_args = argparse.Namespace(**arg_dict) lm_task = LanguageModelingTask.setup_task(lm_args) lm_dict = lm_task.output_dictionary if self.args.channel_model is not None: channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) for model in channel_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() else: channel_models = None lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) for model in lm_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() return NoisyChannelSequenceGenerator( combine_method=self.args.combine_method, tgt_dict=self.target_dictionary, src_dict=self.source_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), normalize_scores=(not getattr(args, 'unnormalized', False)), channel_models=channel_models, k2=getattr(self.args, 'k2', 50), ch_weight=getattr(self.args, 'ch_wt', 1), channel_scoring_type=self.args.channel_scoring_type, top_k_vocab=self.args.top_k_vocab, lm_models=lm_models, lm_dict=lm_dict, lm_weight=getattr(self.args, 'lm_wt', 1), normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), )
EXA-1-master
exa/models/unilm-master/edgelm/examples/fast_noisy_channel/noisy_channel_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as op from collections import namedtuple from multiprocessing import cpu_count from typing import List, Optional import sentencepiece as sp from fairseq.data.encoders.byte_bpe import ByteBPE from fairseq.data.encoders.byte_utils import byte_encode from fairseq.data.encoders.bytes import Bytes from fairseq.data.encoders.characters import Characters from fairseq.data.encoders.moses_tokenizer import MosesTokenizer from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE SPLITS = ["train", "valid", "test"] def _convert_xml(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if not ss.startswith("<seg"): continue ss = ss.replace("</seg>", "").split('">') assert len(ss) == 2 f_o.write(ss[1].strip() + "\n") def _convert_train(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if ss.startswith("<"): continue f_o.write(ss.strip() + "\n") def _get_bytes(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Bytes.encode(s.strip()) + "\n") def _get_chars(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Characters.encode(s.strip()) + "\n") def pretokenize(in_path: str, out_path: str, src: str, tgt: str): Args = namedtuple( "Args", [ "moses_source_lang", "moses_target_lang", "moses_no_dash_splits", "moses_no_escape", ], ) args = Args( moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False, ) pretokenizer = MosesTokenizer(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(pretokenizer.encode(s.strip()) + "\n") def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str): with open(out_path, "w") as f_o: for lang in [src, tgt]: with open(f"{in_path_prefix}.{lang}") as f: for s in f: f_o.write(byte_encode(s.strip()) + "\n") def _get_bpe(in_path: str, model_prefix: str, vocab_size: int): arguments = [ f"--input={in_path}", f"--model_prefix={model_prefix}", f"--model_type=bpe", f"--vocab_size={vocab_size}", "--character_coverage=1.0", "--normalization_rule_name=identity", f"--num_threads={cpu_count()}", ] sp.SentencePieceTrainer.Train(" ".join(arguments)) def _apply_bbpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model_path"]) args = Args(sentencepiece_model_path=model_path) tokenizer = ByteBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model"]) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _concat_files(in_paths: List[str], out_path: str): with open(out_path, "w") as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r) def preprocess_iwslt17( root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool, ): # extract bitext in_root = op.join(root, f"{src}-{tgt}") for lang in [src, tgt]: _convert_train( op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"), op.join(root, f"train.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"), op.join(root, f"valid.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"), op.join(root, f"test.{lang}"), ) # pre-tokenize for lang in [src, tgt]: for split in SPLITS: pretokenize( op.join(root, f"{split}.{lang}"), op.join(root, f"{split}.moses.{lang}"), src, tgt, ) # tokenize with BPE vocabulary if bpe_size is not None: # learn vocabulary concated_train_path = op.join(root, "train.all") _concat_files( [op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")], concated_train_path, ) bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}") _get_bpe(concated_train_path, bpe_model_prefix, bpe_size) os.remove(concated_train_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bpe( bpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"), ) # tokenize with bytes vocabulary if need_bytes: for lang in [src, tgt]: for split in SPLITS: _get_bytes( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bytes.{lang}"), ) # tokenize with characters vocabulary if need_chars: for lang in [src, tgt]: for split in SPLITS: _get_chars( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.chars.{lang}"), ) # tokenize with byte-level BPE vocabulary if bbpe_size is not None: # learn vocabulary bchar_path = op.join(root, "train.bchar") _convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path) bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}") _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size) os.remove(bchar_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bbpe( bbpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"), ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default="data") parser.add_argument( "--bpe-vocab", default=None, type=int, help="Generate tokenized bitext with BPE of size K." "Default to None (disabled).", ) parser.add_argument( "--bbpe-vocab", default=None, type=int, help="Generate tokenized bitext with BBPE of size K." "Default to None (disabled).", ) parser.add_argument( "--byte-vocab", action="store_true", help="Generate tokenized bitext with bytes vocabulary", ) parser.add_argument( "--char-vocab", action="store_true", help="Generate tokenized bitext with chars vocabulary", ) args = parser.parse_args() preprocess_iwslt17( args.root, "fr", "en", args.bpe_vocab, args.char_vocab, args.bbpe_vocab, args.byte_vocab, ) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/examples/byte_level_bpe/get_bitext.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerEncoder, TransformerModel @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU( input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True, ) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
EXA-1-master
exa/models/unilm-master/edgelm/examples/byte_level_bpe/gru_transformer.py
#!/usr/bin/env python """Helper script to compare two argparse.Namespace objects.""" from argparse import Namespace # noqa def main(): ns1 = eval(input("Namespace 1: ")) ns2 = eval(input("Namespace 2: ")) def keys(ns): ks = set() for k in dir(ns): if not k.startswith("_"): ks.add(k) return ks k1 = keys(ns1) k2 = keys(ns2) def print_keys(ks, ns1, ns2=None): for k in ks: if ns2 is None: print("{}\t{}".format(k, getattr(ns1, k, None))) else: print( "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None)) ) print("Keys unique to namespace 1:") print_keys(k1 - k2, ns1) print() print("Keys unique to namespace 2:") print_keys(k2 - k1, ns2) print() print("Overlapping keys with different values:") ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")] print_keys(ks, ns1, ns2) print() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/compare_namespaces.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into a train and valid set while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import random import sys def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("sample_output", help="train output file") parser.add_argument("remainder_output", help="valid output file") parser.add_argument("-k", type=int, help="remainder size") parser.add_argument( "--lines", action="store_true", help="split lines instead of docs" ) args = parser.parse_args() assert args.k is not None sample = [] remainder = [] num_docs = [0] def update_sample(doc): if len(sample) < args.k: sample.append(doc.copy()) else: i = num_docs[0] j = random.randrange(i + 1) if j < args.k: remainder.append(sample[j]) sample[j] = doc.copy() else: remainder.append(doc.copy()) num_docs[0] += 1 doc.clear() with open(args.input, "r", encoding="utf-8") as h: doc = [] for i, line in enumerate(h): if line.strip() == "": # empty line indicates new document update_sample(doc) else: doc.append(line) if args.lines: update_sample(doc) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) if len(doc) > 0: update_sample(doc) print(file=sys.stderr, flush=True) assert len(sample) == args.k with open(args.sample_output, "w", encoding="utf-8") as out: first = True for doc in sample: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) with open(args.remainder_output, "w", encoding="utf-8") as out: first = True for doc in remainder: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/split_train_valid_docs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Use this script in order to build symmetric alignments for your translation dataset. This script depends on fast_align and mosesdecoder tools. You will need to build those before running the script. fast_align: github: http://github.com/clab/fast_align instructions: follow the instructions in README.md mosesdecoder: github: http://github.com/moses-smt/mosesdecoder instructions: http://www.statmt.org/moses/?n=Development.GetStarted The script produces the following files under --output_dir: text.joined - concatenation of lines from the source_file and the target_file. align.forward - forward pass of fast_align. align.backward - backward pass of fast_align. aligned.sym_heuristic - symmetrized alignment. """ import argparse import os from itertools import zip_longest def main(): parser = argparse.ArgumentParser(description="symmetric alignment builer") # fmt: off parser.add_argument('--fast_align_dir', help='path to fast_align build directory') parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory') parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and') parser.add_argument('--source_file', help='path to a file with sentences ' 'in the source language') parser.add_argument('--target_file', help='path to a file with sentences ' 'in the target language') parser.add_argument('--output_dir', help='output directory') # fmt: on args = parser.parse_args() fast_align_bin = os.path.join(args.fast_align_dir, "fast_align") symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal") sym_fast_align_bin = os.path.join( args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl" ) # create joined file joined_file = os.path.join(args.output_dir, "text.joined") with open(args.source_file, "r", encoding="utf-8") as src, open( args.target_file, "r", encoding="utf-8" ) as tgt: with open(joined_file, "w", encoding="utf-8") as joined: for s, t in zip_longest(src, tgt): print("{} ||| {}".format(s.strip(), t.strip()), file=joined) bwd_align_file = os.path.join(args.output_dir, "align.backward") # run forward alignment fwd_align_file = os.path.join(args.output_dir, "align.forward") fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file ) assert os.system(fwd_fast_align_cmd) == 0 # run backward alignment bwd_align_file = os.path.join(args.output_dir, "align.backward") bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file ) assert os.system(bwd_fast_align_cmd) == 0 # run symmetrization sym_out_file = os.path.join(args.output_dir, "aligned") sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format( SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin, ) assert os.system(sym_cmd) == 0 if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/build_sym_alignment.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for decoding" ) parser.add_argument("--input", required=True, help="input file to decode") parser.add_argument("--input_format", choices=["piece", "id"], default="piece") args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.input_format == "piece": def decode(l): return "".join(sp.DecodePieces(l)) elif args.input_format == "id": def decode(l): return "".join(sp.DecodeIds(l)) else: raise NotImplementedError def tok2int(tok): # remap reference-side <unk> (represented as <<unk>>) to 0 return int(tok) if tok != "<<unk>>" else 0 with open(args.input, "r", encoding="utf-8") as h: for line in h: if args.input_format == "id": print(decode(list(map(tok2int, line.rstrip().split())))) elif args.input_format == "piece": print(decode(line.rstrip().split())) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/spm_decode.py
EXA-1-master
exa/models/unilm-master/edgelm/scripts/__init__.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import re import shutil import sys pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt") pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt") pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt") def parse_checkpoints(files): entries = [] for f in files: m = pt_regexp_epoch_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) else: m = pt_regexp_update_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) return entries def last_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(entries, reverse=True)[:n]] def every_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(sorted(entries)[::-n])] def main(): parser = argparse.ArgumentParser( description=( "Recursively delete checkpoint files from `root_dir`, " "but preserve checkpoint_best.pt and checkpoint_last.pt" ) ) parser.add_argument("root_dirs", nargs="*") parser.add_argument( "--save-last", type=int, default=0, help="number of last checkpoints to save" ) parser.add_argument( "--save-every", type=int, default=0, help="interval of checkpoints to save" ) parser.add_argument( "--preserve-test", action="store_true", help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)", ) parser.add_argument( "--delete-best", action="store_true", help="delete checkpoint_best.pt" ) parser.add_argument( "--delete-last", action="store_true", help="delete checkpoint_last.pt" ) parser.add_argument( "--no-dereference", action="store_true", help="don't dereference symlinks" ) args = parser.parse_args() files_to_desymlink = [] files_to_preserve = [] files_to_delete = [] for root_dir in args.root_dirs: for root, _subdirs, files in os.walk(root_dir): if args.save_last > 0: to_save = last_n_checkpoints(files, args.save_last) else: to_save = [] if args.save_every > 0: to_save += every_n_checkpoints(files, args.save_every) for file in files: if not pt_regexp.fullmatch(file): continue full_path = os.path.join(root, file) if ( not os.path.basename(root).startswith("test_") or args.preserve_test ) and ( (file == "checkpoint_last.pt" and not args.delete_last) or (file == "checkpoint_best.pt" and not args.delete_best) or file in to_save ): if os.path.islink(full_path) and not args.no_dereference: files_to_desymlink.append(full_path) else: files_to_preserve.append(full_path) else: files_to_delete.append(full_path) if len(files_to_desymlink) == 0 and len(files_to_delete) == 0: print("Nothing to do.") sys.exit(0) files_to_desymlink = sorted(files_to_desymlink) files_to_preserve = sorted(files_to_preserve) files_to_delete = sorted(files_to_delete) print("Operations to perform (in order):") if len(files_to_desymlink) > 0: for file in files_to_desymlink: print(" - preserve (and dereference symlink): " + file) if len(files_to_preserve) > 0: for file in files_to_preserve: print(" - preserve: " + file) if len(files_to_delete) > 0: for file in files_to_delete: print(" - delete: " + file) while True: resp = input("Continue? (Y/N): ") if resp.strip().lower() == "y": break elif resp.strip().lower() == "n": sys.exit(0) print("Executing...") if len(files_to_desymlink) > 0: for file in files_to_desymlink: realpath = os.path.realpath(file) print("rm " + file) os.remove(file) print("cp {} {}".format(realpath, file)) shutil.copyfile(realpath, file) if len(files_to_delete) > 0: for file in files_to_delete: print("rm " + file) os.remove(file) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/rm_pt.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Count the number of documents and average number of lines and tokens per document in a large file. Documents should be separated by a single empty line. """ import argparse import gzip import sys import numpy as np def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--gzip", action="store_true") args = parser.parse_args() def gopen(): if args.gzip: return gzip.open(args.input, "r") else: return open(args.input, "r", encoding="utf-8") num_lines = [] num_toks = [] with gopen() as h: num_docs = 1 num_lines_in_doc = 0 num_toks_in_doc = 0 for i, line in enumerate(h): if len(line.strip()) == 0: # empty line indicates new document num_docs += 1 num_lines.append(num_lines_in_doc) num_toks.append(num_toks_in_doc) num_lines_in_doc = 0 num_toks_in_doc = 0 else: num_lines_in_doc += 1 num_toks_in_doc += len(line.rstrip().split()) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) print(file=sys.stderr, flush=True) print("found {} docs".format(num_docs)) print("average num lines per doc: {}".format(np.mean(num_lines))) print("average num toks per doc: {}".format(np.mean(num_toks))) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/count_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import contextlib import sys import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for encoding" ) parser.add_argument( "--inputs", nargs="+", default=["-"], help="input files to filter/encode" ) parser.add_argument( "--outputs", nargs="+", default=["-"], help="path to save encoded outputs" ) parser.add_argument("--output_format", choices=["piece", "id"], default="piece") parser.add_argument( "--min-len", type=int, metavar="N", help="filter sentence pairs with fewer than N tokens", ) parser.add_argument( "--max-len", type=int, metavar="N", help="filter sentence pairs with more than N tokens", ) args = parser.parse_args() assert len(args.inputs) == len( args.outputs ), "number of input and output paths should match" sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.output_format == "piece": def encode(l): return sp.EncodeAsPieces(l) elif args.output_format == "id": def encode(l): return list(map(str, sp.EncodeAsIds(l))) else: raise NotImplementedError if args.min_len is not None or args.max_len is not None: def valid(line): return (args.min_len is None or len(line) >= args.min_len) and ( args.max_len is None or len(line) <= args.max_len ) else: def valid(lines): return True with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8")) if input != "-" else sys.stdin for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8")) if output != "-" else sys.stdout for output in args.outputs ] stats = { "num_empty": 0, "num_filtered": 0, } def encode_line(line): line = line.strip() if len(line) > 0: line = encode(line) if valid(line): return line else: stats["num_filtered"] += 1 else: stats["num_empty"] += 1 return None for i, lines in enumerate(zip(*inputs), start=1): enc_lines = list(map(encode_line, lines)) if not any(enc_line is None for enc_line in enc_lines): for enc_line, output_h in zip(enc_lines, outputs): print(" ".join(enc_line), file=output_h) if i % 10000 == 0: print("processed {} lines".format(i), file=sys.stderr) print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr) print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/spm_encode.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into shards while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import contextlib def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--num-shards", type=int) args = parser.parse_args() assert args.num_shards is not None and args.num_shards > 1 with open(args.input, "r", encoding="utf-8") as h: with contextlib.ExitStack() as stack: outputs = [ stack.enter_context( open(args.input + ".shard" + str(i), "w", encoding="utf-8") ) for i in range(args.num_shards) ] doc = [] first_doc = [True] * args.num_shards def output_doc(i): if not first_doc[i]: outputs[i].write("\n") first_doc[i] = False for line in doc: outputs[i].write(line) doc.clear() num_docs = 0 for line in h: if line.strip() == "": # empty line indicates new document output_doc(num_docs % args.num_shards) num_docs += 1 else: doc.append(line) output_doc(num_docs % args.num_shards) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/shard_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import sys import sentencepiece as spm if __name__ == "__main__": spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
EXA-1-master
exa/models/unilm-master/edgelm/scripts/spm_train.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import os import re import torch from fairseq.file_io import PathManager def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, "rb") as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state["model"] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( "For checkpoint {}, expected list of params: {}, " "but found: {}".format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state["model"] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") else: pt_regexp = re.compile(r"checkpoint(\d+)\.pt") files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception( "Found {} checkpoint files but need at least {}", len(entries), n ) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description="Tool to average the params of input checkpoints to " "produce a new checkpoint", ) # fmt: off parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, ' 'and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, ' 'and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' 'when using --num-update-checkpoints, this will set an upper bound on which update to use' 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.' 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500' ) # fmt: on args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints assert args.checkpoint_upper_bound is None or ( args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None ), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints" assert ( args.num_epoch_checkpoints is None or args.num_update_checkpoints is None ), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints" if num is not None: args.inputs = last_n_checkpoints( args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound, ) print("averaging checkpoints: ", args.inputs) new_state = average_checkpoints(args.inputs) with PathManager.open(args.output, "wb") as f: torch.save(new_state, f) print("Finished writing averaged checkpoint to {}".format(args.output)) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from fairseq.data import Dictionary, data_utils, indexed_dataset def get_parser(): parser = argparse.ArgumentParser( description="writes text from binarized file to stdout" ) # fmt: off parser.add_argument('--dataset-impl', help='dataset implementation', choices=indexed_dataset.get_available_dataset_impl()) parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() dictionary = Dictionary.load(args.dict) if args.dict is not None else None dataset = data_utils.load_indexed_dataset( args.input, dictionary, dataset_impl=args.dataset_impl, default="lazy", ) for tensor_line in dataset: if dictionary is None: line = " ".join([str(int(x)) for x in tensor_line]) else: line = dictionary.string(tensor_line) print(line) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/edgelm/scripts/read_binarized.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys """Reads in a fairseq output file, and verifies that the constraints (C- lines) are present in the output (the first H- line). Assumes that constraints are listed prior to the first hypothesis. """ constraints = [] found = 0 total = 0 for line in sys.stdin: if line.startswith("C-"): constraints.append(line.rstrip().split("\t")[1]) elif line.startswith("H-"): text = line.split("\t")[2] for constraint in constraints: total += 1 if constraint in text: found += 1 else: print(f"No {constraint} in {text}", file=sys.stderr) constraints = [] print(f"Found {found} / {total} = {100 * found / total:.1f}%")
EXA-1-master
exa/models/unilm-master/edgelm/scripts/constraints/validate.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index < len(words) - length + 1 phr = " ".join(words[index : index + length]) for i in range(index, index + length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if "\t" in line: source, target = line.split("\t") if args.add_sos: target = f"<s> {target}" if args.add_eos: target = f"{target} </s>" if len(target.split()) >= args.len: words = [target] num = args.number choices = {} for i in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = " ".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(" ".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(" ".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target = target.replace(choice, " " * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep="\t") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") parser.add_argument( "--add-sos", default=False, action="store_true", help="add <s> token" ) parser.add_argument( "--add-eos", default=False, action="store_true", help="add </s> token" ) parser.add_argument("--seed", "-s", default=0, type=int) args = parser.parse_args() main(args)
EXA-1-master
exa/models/unilm-master/edgelm/scripts/constraints/extract.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import math import sys import json from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.utils import ModelEma from timm.utils import accuracy, ModelEma from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from datasets import get_sentencepiece_model_for_beit3 import utils class TaskHandler(object): def __init__(self) -> None: self.metric_logger = None self.split = None def train_batch(self, model, **kwargs): raise NotImplementedError() def eval_batch(self, model, **kwargs): raise NotImplementedError() def before_eval(self, metric_logger, data_loader, **kwargs): self.metric_logger = metric_logger self.split = data_loader.dataset.split def after_eval(self, **kwargs): raise NotImplementedError() class NLVR2Handler(TaskHandler): def __init__(self) -> None: super().__init__() self.criterion = torch.nn.CrossEntropyLoss() def train_batch(self, model, image, image2, language_tokens, padding_mask, label): logits = model( image_a=image, image_b=image2, text_description=language_tokens, padding_mask=padding_mask) acc = (logits.max(-1)[-1] == label).float().mean() return { "loss": self.criterion(input=logits, target=label), "acc": acc, } def eval_batch(self, model, image, image2, language_tokens, padding_mask, label): logits = model( image_a=image, image_b=image2, text_description=language_tokens, padding_mask=padding_mask) batch_size = language_tokens.shape[0] acc = (logits.max(-1)[-1] == label).float().sum(0) * 100.0 / batch_size self.metric_logger.meters['acc'].update(acc.item(), n=batch_size) def after_eval(self, **kwargs): print('* Acc {acc.global_avg:.3f}'.format(acc=self.metric_logger.acc)) return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "acc" class ImageNetHandler(TaskHandler): def __init__(self, args) -> None: super().__init__() mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: # smoothing is handled with mixup label transform self.criterion = SoftTargetCrossEntropy() elif args.label_smoothing > 0.: self.criterion = LabelSmoothingCrossEntropy(smoothing=args.label_smoothing) else: self.criterion = torch.nn.CrossEntropyLoss() def train_batch(self, model, image, label): logits = model(image=image) return { "loss": self.criterion(logits, label), } def eval_batch(self, model, image, label): logits = model(image=image) batch_size = image.shape[0] acc1, acc5 = accuracy(logits, label, topk=(1, 5)) self.metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) self.metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) def after_eval(self, **kwargs): print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}' .format(top1=self.metric_logger.acc1, top5=self.metric_logger.acc5)) return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "acc1" class RetrievalHandler(TaskHandler): def __init__(self) -> None: super().__init__() self.image_feats = [] self.text_feats = [] self.image_ids = [] self.metric_logger = None def train_batch(self, model, image, language_tokens, padding_mask, image_id): loss, vision_cls, language_cls = model( image=image, text_description=language_tokens, padding_mask=padding_mask) return { "loss": loss, } def before_eval(self, metric_logger, **kwargs): self.image_feats.clear() self.text_feats.clear() self.image_ids.clear() self.metric_logger = metric_logger def eval_batch(self, model, image, language_tokens, padding_mask, image_id): vision_cls, _ = model(image=image, only_infer=True) _, language_cls = model( text_description=language_tokens, padding_mask=padding_mask, only_infer=True) self.image_feats.append(vision_cls.clone()) self.text_feats.append(language_cls.clone()) self.image_ids.append(image_id.clone()) def after_eval(self, **kwargs): image_feats = {} for feats, ids in zip(self.image_feats, self.image_ids): for i, _idx in enumerate(ids): idx = _idx.item() if idx not in image_feats: image_feats[idx] = feats[i] tiids = torch.cat(self.image_ids, dim=0) iids = [] sorted_tensors = [] for key in sorted(image_feats.keys()): sorted_tensors.append(image_feats[key].view(1, -1)) iids.append(key) image_cls_feats = torch.cat(sorted_tensors, dim=0) text_cls_feats = torch.cat(self.text_feats, dim=0) scores = image_cls_feats @ text_cls_feats.t() iids = torch.LongTensor(iids).to(scores.device) print("scores: {}".format(scores.size())) print("iids: {}".format(iids.size())) print("tiids: {}".format(tiids.size())) topk10 = scores.topk(10, dim=1) topk5 = scores.topk(5, dim=1) topk1 = scores.topk(1, dim=1) topk10_iids = tiids[topk10.indices] topk5_iids = tiids[topk5.indices] topk1_iids = tiids[topk1.indices] tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean() tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean() tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean() topk10 = scores.topk(10, dim=0) topk5 = scores.topk(5, dim=0) topk1 = scores.topk(1, dim=0) topk10_iids = iids[topk10.indices] topk5_iids = iids[topk5.indices] topk1_iids = iids[topk1.indices] ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean() ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean() ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean() eval_result = { "tr_r10": tr_r10.item() * 100.0, "tr_r5": tr_r5.item() * 100.0, "tr_r1": tr_r1.item() * 100.0, "ir_r10": ir_r10.item() * 100.0, "ir_r5": ir_r5.item() * 100.0, "ir_r1": ir_r1.item() * 100.0, "average_score": 100.0 * (tr_r1 + tr_r5 + tr_r10 + ir_r1 + ir_r5 + ir_r10).item() / 6.0, } print('* Eval result = %s' % json.dumps(eval_result)) return eval_result, "average_score" class VQAHandler(TaskHandler): def __init__(self) -> None: super().__init__() self.predictions = [] self.criterion = nn.BCEWithLogitsLoss(reduction='mean') self.label2ans = None def train_batch(self, model, image, language_tokens, padding_mask, labels): logits = model( image=image, question=language_tokens, padding_mask=padding_mask) return { "loss": self.criterion(input=logits.float(), target=labels.float()) * labels.shape[1], } def before_eval(self, metric_logger, data_loader, **kwargs): self.predictions.clear() self.metric_logger = metric_logger self.label2ans = data_loader.dataset.label2ans def eval_batch(self, model, image, language_tokens, padding_mask, labels=None, qid=None): logits = model( image=image, question=language_tokens, padding_mask=padding_mask) batch_size = language_tokens.shape[0] if labels is not None: scores = utils.VQAScore()(logits, labels) * 100.0 self.metric_logger.meters['score'].update(scores.item(), n=batch_size) else: _, preds = logits.max(-1) for image_id, pred in zip(qid, preds): self.predictions.append({ "question_id": image_id.item(), "answer": self.label2ans[pred.item()], }) def after_eval(self, **kwargs): if len(self.predictions) == 0: print('* Score {score.global_avg:.3f}'.format(score=self.metric_logger.score)) return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "score" else: return self.predictions, "prediction" class CaptioningHandler(TaskHandler): def __init__(self, args) -> None: super().__init__() self.predictions = [] self.criterion = utils.BertCaptioningLoss(args.label_smoothing, args.drop_worst_ratio, args.drop_worst_after) self.tokenizer = get_sentencepiece_model_for_beit3(args) self.num_beams = args.num_beams self.max_len = args.num_max_bpe_tokens self.length_penalty = args.length_penalty self.vocab_size = args.vocab_size def train_batch(self, model, image, language_tokens, masked_tokens, language_masked_pos, padding_mask, image_id, global_step): logits, _ = model( image=image, text_ids=masked_tokens, padding_mask=padding_mask, language_masked_pos=language_masked_pos, image_id=image_id) masked_labels = language_tokens[language_masked_pos.bool()] score = torch.max(logits, -1)[1].data == masked_labels acc = torch.sum(score.float()) / torch.sum(language_masked_pos) return { "loss": self.criterion(logits, masked_labels, global_step), "acc": acc } def before_eval(self, metric_logger, data_loader, **kwargs): self.predictions.clear() self.metric_logger = metric_logger def eval_batch(self, model, image, image_id=None): cur_len = 2 num_keep_best = 1 TOPN_PER_BEAM = 3 batch_size = image.size(0) mask_id = self.tokenizer.mask_token_id cls_id = self.tokenizer.cls_token_id pad_id = self.tokenizer.pad_token_id sep_id = self.tokenizer.sep_token_id eos_token_ids = [sep_id] cls_ids = torch.full( (batch_size, 1), cls_id, dtype=torch.long, device=image.device ) mask_ids = torch.full( (batch_size, 1), mask_id, dtype=torch.long, device=image.device ) cur_input_ids = torch.cat([cls_ids, mask_ids], dim=1) tmp_ids = torch.full( (batch_size, self.max_len-1), mask_id, dtype=torch.long, device=image.device ) decoding_results = torch.cat([cls_ids, tmp_ids], dim=1) # Expand input to num beams cur_input_ids = cur_input_ids.unsqueeze(1).expand(batch_size, self.num_beams, cur_len) cur_input_ids = cur_input_ids.contiguous().view(batch_size * self.num_beams, cur_len) # (batch_size * num_beams, cur_len) decoding_results = decoding_results.unsqueeze(1).expand(batch_size, self.num_beams, self.max_len) decoding_results = decoding_results.contiguous().view(batch_size * self.num_beams, self.max_len) # (batch_size * num_beams, cur_len) image = image.unsqueeze(1).expand(batch_size, self.num_beams, image.size(-3), image.size(-2), image.size(-1)) image = image.contiguous().view(batch_size * self.num_beams, image.size(-3), image.size(-2), image.size(-1)) generated_hyps = [ utils.BeamHypotheses( num_keep_best, self.max_len, length_penalty=self.length_penalty, early_stopping=False ) for _ in range(batch_size) ] # scores for each sentence in the beam beam_scores = torch.zeros((batch_size, self.num_beams), dtype=torch.float, device=cur_input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) # done sentences done = [False for _ in range(batch_size)] incremental_state = {} while cur_len <= self.max_len: next_token_idx = 1 padding_masks = torch.full( cur_input_ids.shape, 0, dtype=torch.long, device=image.device ) input_image = image if cur_len != 2: input_image = None outputs, incremental_state_next = model( image=input_image, text_ids=cur_input_ids, language_masked_pos=None, padding_mask=padding_masks, text_len=cur_len, incremental_state=incremental_state) incremental_state = incremental_state_next # assert outputs.shape[1] == token_len scores = outputs[:, next_token_idx, :] # (batch_size * num_beams, vocab_size) scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size) assert scores.size() == (batch_size * self.num_beams, self.vocab_size) # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis accross beams) _scores = _scores.view(batch_size, self.num_beams * self.vocab_size) # (batch_size, num_beams * vocab_size) next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * self.num_beams, dim=1, largest=True, sorted=True) assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * self.num_beams) # next batch beam content # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch) next_batch_beam = [] # for each sentence for batch_ex in range(batch_size): # if we are done with this sentence done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item()) if done[batch_ex]: next_batch_beam.extend([(0, pad_id, 0)] * self.num_beams) # pad the batch continue # next sentence beam content next_sent_beam = [] for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]): # get beam and word IDs beam_id = idx // self.vocab_size word_id = idx % self.vocab_size # end of sentence, or next word # if word_id.item() in eos_token_ids or cur_len + 1 == max_len: if (word_id.item() in eos_token_ids and cur_len + 1 <= self.max_len) or (cur_len + 1 == self.max_len): generated_hyps[batch_ex].add( decoding_results[batch_ex * self.num_beams + beam_id, :cur_len].clone(), score.item() ) else: next_sent_beam.append((score, word_id, batch_ex * self.num_beams + beam_id)) # the beam for next step is full if len(next_sent_beam) == self.num_beams: break # update next beam content if cur_len + 1 == self.max_len: assert len(next_sent_beam) == 0 else: assert len(next_sent_beam) == self.num_beams if len(next_sent_beam) == 0: next_sent_beam = [(0, pad_id, 0)] * self.num_beams # pad the batch next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == self.num_beams * (batch_ex + 1) # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * self.num_beams beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) beam_words = cur_input_ids.new([x[1] for x in next_batch_beam]) beam_idx = cur_input_ids.new([x[2] for x in next_batch_beam]) # re-order batch cur_input_ids = cur_input_ids[beam_idx, :] decoding_results = decoding_results[beam_idx, :] for module in incremental_state: for key in incremental_state[module]: result = incremental_state[module][key].index_select(0, beam_idx) incremental_state[module][key] = result[:,:,:-1,:] next_ids = torch.full( (batch_size * self.num_beams, 1), mask_id, dtype=torch.long, device=image.device ) cur_input_ids = torch.cat([beam_words.unsqueeze(1), next_ids], dim=1) decoding_results[:, cur_len-1] = beam_words # update current length cur_len = cur_len + 1 # stop when we are done with each sentence if all(done): break # select the best hypotheses tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long) logprobs = torch.zeros(batch_size, num_keep_best, dtype=torch.float).fill_(-1e5).to(cur_input_ids.device) all_best = [] for i, hypotheses in enumerate(generated_hyps): best = [] hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp]) _, best_indices = torch.topk(hyp_scores, min(num_keep_best, len(hyp_scores)), largest=True) for best_idx, hyp_idx in enumerate(best_indices): conf, best_hyp = hypotheses.hyp[hyp_idx] best.append(best_hyp) logprobs[i, best_idx] = conf tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the <EOS> symbol all_best.append(best) # generate target batch, pad to the same length decoded = cur_input_ids.new(batch_size, num_keep_best, self.max_len).fill_(pad_id) for batch_idx, best in enumerate(all_best): for best_idx, hypo in enumerate(best): decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0] captions = self.tokenizer.batch_decode(decoded.squeeze(1), skip_special_tokens=True) for qid, pred in zip(image_id, captions): self.predictions.append({ "image_id": qid.item(), "caption": pred, }) def after_eval(self, **kwargs): return self.predictions, "prediction" def get_handler(args): if args.task == "nlvr2": return NLVR2Handler() elif args.task == "vqav2": return VQAHandler() elif args.task in ("flickr30k", "coco_retrieval"): return RetrievalHandler() elif args.task in ("coco_captioning", "nocaps"): return CaptioningHandler(args) elif args.task in ("imagenet"): return ImageNetHandler(args) else: raise NotImplementedError("Sorry, %s is not support." % args.task) def train_one_epoch( model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, handler: TaskHandler, epoch: int, start_steps: int, lr_schedule_values: list, loss_scaler, max_norm: float = 0, update_freq: int = 1, model_ema: Optional[ModelEma] = None, log_writer: Optional[utils.TensorboardLogger] = None, task = None, mixup_fn=None, ): model.train(True) metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 if loss_scaler is None: model.zero_grad() model.micro_steps = 0 else: optimizer.zero_grad() for data_iter_step, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)): step = data_iter_step // update_freq global_step = start_steps + step # global training iteration # Update LR & WD for the first acc if lr_schedule_values is not None and data_iter_step % update_freq == 0: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[global_step] * param_group["lr_scale"] # put input data into cuda for tensor_key in data.keys(): data[tensor_key] = data[tensor_key].to(device, non_blocking=True) # print("input %s = %s" % (tensor_key, data[tensor_key])) if loss_scaler is None and tensor_key.startswith("image"): data[tensor_key] = data[tensor_key].half() # mixup for imagenet finetuning if mixup_fn is not None: data["image"], data["label"] = mixup_fn(data["image"], data["label"]) if task in ["coco_captioning", "nocaps"]: data["global_step"] = global_step if loss_scaler is None: results = handler.train_batch(model, **data) else: with torch.cuda.amp.autocast(): results = handler.train_batch(model, **data) loss = results.pop("loss") loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) if loss_scaler is None: loss /= update_freq model.backward(loss) model.step() if (data_iter_step + 1) % update_freq == 0: # model.zero_grad() # Deepspeed will call step() & model.zero_grad() automatic if model_ema is not None: model_ema.update(model) grad_norm = None loss_scale_value = utils.get_loss_scale_for_deepspeed(model) else: # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order loss /= update_freq grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(data_iter_step + 1) % update_freq == 0) if (data_iter_step + 1) % update_freq == 0: optimizer.zero_grad() if model_ema is not None: model_ema.update(model) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() metric_logger.update(loss=loss_value) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: kwargs = { "loss": loss_value, } for key in results: kwargs[key] = results[key] log_writer.update(head="train", **kwargs) kwargs = { "loss_scale": loss_scale_value, "lr": max_lr, "min_lr": min_lr, "weight_decay": weight_decay_value, "grad_norm": grad_norm, } log_writer.update(head="opt", **kwargs) log_writer.set_step() # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} @torch.no_grad() def evaluate(data_loader, model, device, handler): metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' # switch to evaluation mode model.eval() handler.before_eval(metric_logger=metric_logger, data_loader=data_loader) for data in metric_logger.log_every(data_loader, 10, header): for tensor_key in data.keys(): data[tensor_key] = data[tensor_key].to(device, non_blocking=True) with torch.cuda.amp.autocast(): handler.eval_batch(model=model, **data) # gather the stats from all processes metric_logger.synchronize_between_processes() return handler.after_eval()
EXA-1-master
exa/models/unilm-master/beit3/engine_for_finetuning.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import os import json import random import torch import glob from collections import defaultdict, Counter from torchvision import transforms from torchvision.datasets.folder import default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.data.transforms import RandomResizedCropAndInterpolation from timm.data import create_transform import utils from glossary import normalize_word from randaug import RandomAugment class BaseDataset(torch.utils.data.Dataset): def __init__( self, data_path, split, transform, tokenizer, num_max_bpe_tokens, task=None, ): index_files = self.get_index_files(split, task=task) self.tokenizer = tokenizer self.num_max_bpe_tokens = num_max_bpe_tokens self.data_path = data_path items = [] self.index_files = index_files offset = 0 for _index_file in index_files: index_file = os.path.join(data_path, _index_file) with open(index_file, mode="r", encoding="utf-8") as reader: for line in reader: data = json.loads(line) items.append(data) print("Load %d image-text pairs from %s. " % (len(items) - offset, index_file)) offset = len(items) self.items = items self.bos_token_id = tokenizer.bos_token_id self.eos_token_id = tokenizer.eos_token_id self.pad_token_id = tokenizer.pad_token_id self.loader = default_loader self.transform = transform self.split = split @staticmethod def get_index_files(split): raise NotImplementedError() def _get_image(self, image_path: str): image_path = os.path.join(self.data_path, image_path) image = self.loader(image_path) return self.transform(image) def _get_text_segment(self, text_segment, max_len=None): if isinstance(text_segment, str): tokens = self.tokenizer.tokenize(text_segment) else: tokens = text_segment[:] if len(tokens) == 0: raise RuntimeError("The text segment should contains at least one tokens!") if max_len is None: max_len = self.num_max_bpe_tokens if len(tokens) > max_len - 2: tokens = tokens[:max_len - 2] tokens = [self.bos_token_id] + tokens[:] + [self.eos_token_id] num_tokens = len(tokens) padding_mask = [0] * num_tokens + [1] * (max_len - num_tokens) return tokens + [self.pad_token_id] * (max_len - num_tokens), padding_mask, num_tokens def _get_image_text_example(self, index: int, data: dict): item = self.items[index] img_path = item["image_path"] img = self._get_image(img_path) data["image"] = img text_segment = item["text_segment"] language_tokens, padding_mask, _ = self._get_text_segment(text_segment) data["language_tokens"] = language_tokens data["padding_mask"] = padding_mask def __getitem__(self, index: int): data = dict() self._get_image_text_example(index, data) return data def __len__(self) -> int: return len(self.items) def __repr__(self) -> str: head = "Dataset " + self.__class__.__name__ body = '{' + "\n Number of items: %s," % self.__len__() body += "\n data root = %s," % self.data_path body += "\n split = %s," % self.split body += "\n dataset index files = %s" % str(self.index_files) body += "\n num max bpe tokens = %s" % self.num_max_bpe_tokens body += "\n transforms = [" for t in self.transform.transforms: body += "\n %s" % str(t) body += "\n ]" body += "\n}" return head + body def _write_data_into_jsonl(items, jsonl_file): with open(jsonl_file, mode="w", encoding="utf-8") as writer: for data in items: writer.write(json.dumps(data, indent=None)) writer.write('\n') print("Write %s with %d items !" % (jsonl_file, len(items))) def _make_retrieval_coco_karpathy_dataset_index( data_path, tokenizer, split=("train", "restval"), split_name="train", ): coco_karpathy_split_json_file = os.path.join(data_path, "dataset_coco.json") items = [] image_counter = set() print("read %s" % coco_karpathy_split_json_file) with open(coco_karpathy_split_json_file, mode="r", encoding="utf-8") as reader: data = json.loads(reader.read()) for item in data["images"]: if item["split"] in split: image_path = os.path.join(item["filepath"], item["filename"]) for sent in item["sentences"]: tokens = tokenizer.tokenize(sent["raw"]) token_ids = tokenizer.convert_tokens_to_ids(tokens) items.append({ "image_path": image_path, "text_segment": token_ids, "image_id": len(image_counter), }) if image_path not in image_counter: image_counter.add(image_path) print("Find %d images and %d image-text pairs for karpathy dataset %s split !" % \ (len(image_counter), len(items), split_name)) index_file = os.path.join(data_path, "coco_retrieval.%s.jsonl" % split_name) _write_data_into_jsonl(items, index_file) pass def _make_captioning_coco_karpathy_dataset_index( data_path, tokenizer, split=("train", "restval"), split_name="train", ): coco_karpathy_split_json_file = os.path.join(data_path, "dataset_coco.json") items = [] image_counter = set() print("read %s" % coco_karpathy_split_json_file) with open(coco_karpathy_split_json_file, mode="r", encoding="utf-8") as reader: data = json.loads(reader.read()) for item in data["images"]: if item["split"] in split: image_path = os.path.join(item["filepath"], item["filename"]) if item["split"] in ["train", "restval"]: for sent in item["sentences"]: tokens = tokenizer.tokenize(sent["raw"]) token_ids = tokenizer.convert_tokens_to_ids(tokens) items.append({ "image_path": image_path, "text_segment": token_ids, "image_id": item["cocoid"], }) else: items.append({ "image_path": image_path, "text_segment": None, "image_id": item["cocoid"], }) if image_path not in image_counter: image_counter.add(image_path) print("Find %d images and %d image-text pairs for karpathy dataset %s split !" % \ (len(image_counter), len(items), split_name)) index_file = os.path.join(data_path, "coco_captioning.%s.jsonl" % split_name) _write_data_into_jsonl(items, index_file) pass def _make_nocaps_dataset_index( data_path, split="val", ): if split == "val": json_file = "nocaps_val_4500_captions.json" elif split == "test": json_file = "nocaps_test_image_info.json" nocaps_split_json_file = os.path.join(data_path, json_file) items = [] image_counter = set() print("read %s" % nocaps_split_json_file) with open(nocaps_split_json_file, mode="r", encoding="utf-8") as reader: data = json.loads(reader.read()) for item in data["images"]: image_path = os.path.join(split, item["file_name"]) items.append({ "image_path": image_path, "text_segment": None, "image_id": item["id"], }) if image_path not in image_counter: image_counter.add(image_path) print("Find %d images and %d image-text pairs for nocaps dataset %s split !" % \ (len(image_counter), len(items), split)) index_file = os.path.join(data_path, "nocaps.%s.jsonl" % split) _write_data_into_jsonl(items, index_file) class NLVR2Dataset(BaseDataset): @staticmethod def get_index_files(split, task=None): if split == "train": return ("nlvr2.train.index.jsonl", ) elif split == "val": return ("nlvr2.dev.index.jsonl", ) elif split == "test": return ("nlvr2.test-P.index.jsonl", ) else: raise RuntimeError("split %s is not found!" % split) def __getitem__(self, index: int): data = super().__getitem__(index) item = self.items[index] img_path = item["image2_path"] img = self._get_image(img_path) data["image2"] = img data["label"] = self.items[index]["label"] return data @staticmethod def __preprocess_json(preifx, json_file, tokenizer, index_file): items = [] with open(json_file, mode="r", encoding="utf-8") as reader: for line in reader: data = json.loads(line) path = os.path.join(preifx, str(data["directory"])) if "directory" in data else preifx path = os.path.join(path, "-".join(data["identifier"].split("-")[:-1])) tokens = tokenizer.tokenize(data["sentence"]) token_ids = tokenizer.convert_tokens_to_ids(tokens) items.append({ "image_path": path + "-img0.png", "image2_path": path + "-img1.png", "text_segment": token_ids, "label": 1 if data["label"] == "True" else 0, "identifier": data["identifier"], }) _write_data_into_jsonl(items, index_file) @classmethod def make_dataset_index(cls, data_path, tokenizer, nlvr_repo_path): cls.__preprocess_json( preifx="images/train", json_file=os.path.join(nlvr_repo_path, "nlvr2/data/train.json"), tokenizer=tokenizer, index_file=os.path.join(data_path, cls.get_index_files("train")[0]), ) cls.__preprocess_json( preifx="dev", json_file=os.path.join(nlvr_repo_path, "nlvr2/data/dev.json"), tokenizer=tokenizer, index_file=os.path.join(data_path, cls.get_index_files("val")[0]), ) cls.__preprocess_json( preifx="test1", json_file=os.path.join(nlvr_repo_path, "nlvr2/data/test1.json"), tokenizer=tokenizer, index_file=os.path.join(data_path, cls.get_index_files("test")[0]), ) class ImageNetDataset(BaseDataset): @staticmethod def get_index_files(split, task=None): if split == "train": return ("imagenet.train.index.jsonl", ) elif split == "val": return ("imagenet.val.index.jsonl", ) elif split == "test": return ("imagenet.val.index.jsonl", ) else: raise RuntimeError("split %s is not found!" % split) def __getitem__(self, index: int): data = dict() item = self.items[index] img_path = item["image_path"] img = self._get_image(img_path) data["image"] = img data["label"] = item["label"] return data @staticmethod def _find_classes(dir): """ Finds the class folders in a dataset. Args: dir (string): Root directory path. Returns: tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary. Ensures: No class is a subdirectory of another. """ classes = [d.name for d in os.scandir(dir) if d.is_dir()] classes.sort() class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} return classes, class_to_idx @staticmethod def _make_imagenet_index(data_path, index_path, data_path_prefix, class_to_idx, split): items = [] index_file = os.path.join(index_path, f"imagenet.{split}.index.jsonl") for target_class in sorted(class_to_idx.keys()): class_index = class_to_idx[target_class] target_dir = os.path.join(data_path, target_class) if not os.path.isdir(target_dir): continue for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): for fname in sorted(fnames): path = os.path.join(root, fname) path = path.replace(data_path_prefix, "") items.append({ "image_path": path, "label": class_index, }) _write_data_into_jsonl(items, index_file) @classmethod def make_dataset_index(cls, train_data_path, val_data_path, index_path): data_path_prefix = train_data_path[:[x[0]==x[1] for x in zip(train_data_path, val_data_path)].index(0)] classes, class_to_idx = cls._find_classes(train_data_path) cls._make_imagenet_index( data_path=train_data_path, index_path=index_path, data_path_prefix=data_path_prefix, class_to_idx=class_to_idx, split="train", ) cls._make_imagenet_index( data_path=val_data_path, index_path=index_path, data_path_prefix=data_path_prefix, class_to_idx=class_to_idx, split="val", ) class VQAv2Dataset(BaseDataset): def __init__(self, data_path, **kwargs): super().__init__(data_path=data_path, **kwargs) ans2label_file = os.path.join(data_path, "answer2label.txt") ans2label = {} label2ans = [] with open(ans2label_file, mode="r", encoding="utf-8") as reader: for i, line in enumerate(reader): data = json.loads(line) ans = data["answer"] label = data["label"] label = int(label) assert label == i ans2label[ans] = i label2ans.append(ans) self.ans2label = ans2label self.label2ans = label2ans @staticmethod def get_index_files(split, task=None): if split == "train": return ("vqa.train.jsonl", "vqa.trainable_val.jsonl") elif split == "val": return ("vqa.rest_val.jsonl", ) elif split == "test": return ("vqa.test.jsonl", ) elif split == "test-dev": return ("vqa.test-dev.jsonl", ) else: raise RuntimeError("split %s is not found!" % split) def __getitem__(self, index: int): data = super().__getitem__(index) if "labels" in self.items[index] and len(self.items[index]["labels"]) > 0: labels = [0.] * len(self.label2ans) for l, s in zip(self.items[index]["labels"], self.items[index]["scores"]): labels[l] = s data["labels"] = torch.FloatTensor(labels) else: data["qid"] = self.items[index]["qid"] return data @staticmethod def get_score(occurences): if occurences == 0: return 0.0 elif occurences == 1: return 0.3 elif occurences == 2: return 0.6 elif occurences == 3: return 0.9 else: return 1.0 @classmethod def make_dataset_index(cls, data_path, tokenizer, annotation_data_path): with open(os.path.join(annotation_data_path, "v2_OpenEnded_mscoco_train2014_questions.json"), "r") as fp: questions_train2014 = json.load(fp)["questions"] with open(os.path.join(annotation_data_path, "v2_OpenEnded_mscoco_val2014_questions.json"), "r") as fp: questions_val2014 = json.load(fp)["questions"] with open(os.path.join(annotation_data_path, "v2_OpenEnded_mscoco_test2015_questions.json"), "r") as fp: questions_test2015 = json.load(fp)["questions"] with open(os.path.join(annotation_data_path, "v2_OpenEnded_mscoco_test-dev2015_questions.json"), "r") as fp: questions_test_dev2015 = json.load(fp)["questions"] with open(os.path.join(annotation_data_path, "v2_mscoco_train2014_annotations.json"), "r") as fp: annotations_train2014 = json.load(fp)["annotations"] with open(os.path.join(annotation_data_path, "v2_mscoco_val2014_annotations.json"), "r") as fp: annotations_val2014 = json.load(fp)["annotations"] annotations = dict() for split, questions in zip( ["train", "val", "test", "test-dev"], [questions_train2014, questions_val2014, questions_test2015, questions_test_dev2015], ): _annot = defaultdict(dict) for q in questions: question_text = q["question"] tokens = tokenizer.tokenize(question_text) token_ids = tokenizer.convert_tokens_to_ids(tokens) assert q["question_id"] not in _annot[q["image_id"]] _annot[q["image_id"]][q["question_id"]] = { "question": question_text, "token_ids": token_ids, } annotations[split] = _annot all_major_answers = list() for split, annots in zip( ["train", "val"], [annotations_train2014, annotations_val2014], ): # _annot = annotations[split] for q in annots: all_major_answers.append(q["multiple_choice_answer"]) all_major_answers = [normalize_word(word) for word in all_major_answers] counter = {k: v for k, v in Counter(all_major_answers).items() if v >= 9} ans2label = {k: i for i, k in enumerate(counter.keys())} label2ans = list(counter.keys()) for split, annots in zip( ["train", "val"], [annotations_train2014, annotations_val2014], ): _annot = annotations[split] for q in annots: answers = q["answers"] answer_count = {} for answer in answers: answer_ = answer["answer"] answer_count[answer_] = answer_count.get(answer_, 0) + 1 labels = [] scores = [] for answer in answer_count: if answer not in ans2label: continue labels.append(ans2label[answer]) score = cls.get_score(answer_count[answer]) scores.append(score) assert "labels" not in _annot[q["image_id"]][q["question_id"]] assert "question" in _annot[q["image_id"]][q["question_id"]] _annot[q["image_id"]][q["question_id"]]["labels"] = labels _annot[q["image_id"]][q["question_id"]]["scores"] = scores for split in ["train", "val"]: filtered_annot = dict() for ik, iv in annotations[split].items(): new_q = dict() for qk, qv in iv.items(): if len(qv["labels"]) != 0: new_q[qk] = qv if len(new_q) != 0: filtered_annot[ik] = new_q annotations[split] = filtered_annot split2items = {} for split in ["train", "val", "test", "test-dev"]: annot = annotations[split] split_name = { "train": "train2014", "val": "val2014", "test": "test2015", "test-dev": "test2015", }[split] paths = list(glob.glob(f"{data_path}/{split_name}/*.jpg")) random.shuffle(paths) annot_paths = [path for path in paths \ if int(path.split("/")[-1].split("_")[-1][:-4]) in annot] if len(paths) == len(annot_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print(len(paths), len(annot_paths), len(annot)) items = [] for path in annot_paths: iid = int(path.split("/")[-1].split("_")[-1][:-4]) _annot = annotations[split][iid] for qid in _annot: q = _annot[qid] if split in ["train", "val"]: labels = q["labels"] scores = q["scores"] else: labels, scores = [], [] items.append({ "image_path": os.path.join(split_name, path.split('/')[-1]), "text_segment": q["token_ids"], "labels": labels, "scores": scores, "qid": qid, }) split2items[split] = items _write_data_into_jsonl(items=items, jsonl_file=os.path.join(data_path, "vqa.%s.jsonl" % split)) # Following ViLT, we use 1000 images of the original val set as the final val set val_image2items = defaultdict(list) for item in split2items["val"]: val_image2items[item["image_path"]].append(item) print("Contains %d image and %d pairs for val set!" % (len(val_image2items), len(split2items["val"]))) val_images = list(val_image2items.keys()) random.shuffle(val_images) trainable_val = [] rest_val = [] for i, image_id in enumerate(val_images): if i < 1000: rest_val += val_image2items[image_id] else: trainable_val += val_image2items[image_id] _write_data_into_jsonl(items=trainable_val, jsonl_file=os.path.join(data_path, "vqa.trainable_val.jsonl")) _write_data_into_jsonl(items=rest_val, jsonl_file=os.path.join(data_path, "vqa.rest_val.jsonl")) with open(os.path.join(data_path, "answer2label.txt"), mode="w", encoding="utf-8") as writer: for ans in ans2label: to_json = { "answer": ans, "label": ans2label[ans] } writer.write("%s\n" % json.dumps(to_json)) class RetrievalDataset(BaseDataset): @staticmethod def get_index_files(split, task=None): if split == "train": return (f"{task}.train.jsonl", ) elif split == "val": return (f"{task}.val.jsonl", ) elif split == "test": return (f"{task}.test.jsonl", ) else: raise RuntimeError("split %s is not found!" % split) def __getitem__(self, index: int): data = super().__getitem__(index) data["image_id"] = self.items[index]["image_id"] return data @staticmethod def make_flickr30k_dataset_index(data_path, tokenizer, karpathy_path): with open(os.path.join(karpathy_path, "dataset_flickr30k.json"), "r") as reader: captions = json.loads(reader.read()) captions = captions["images"] split2items = defaultdict(list) split2images = defaultdict(set) for each_item in captions: image_path = os.path.join("flickr30k-images", each_item["filename"]) split = each_item["split"] for text_segment in each_item["sentences"]: tokens = tokenizer.tokenize(text_segment["raw"]) token_ids = tokenizer.convert_tokens_to_ids(tokens) split2items[split].append({ "image_path": image_path, "text_segment": token_ids, "image_id": len(split2images[split]), }) assert each_item["filename"] not in split2images[split] split2images[split].add(each_item["filename"]) for split in split2items: print("%d images and %d image-text pairs!" % (len(split2images[split]), len(split2items[split]))) _write_data_into_jsonl(split2items[split], os.path.join(data_path, "flickr30k.%s.jsonl" % split)) @staticmethod def make_coco_dataset_index(data_path, tokenizer): _make_retrieval_coco_karpathy_dataset_index(data_path, tokenizer, split=("train", "restval"), split_name="train") _make_retrieval_coco_karpathy_dataset_index(data_path, tokenizer, split=("val", ), split_name="val") _make_retrieval_coco_karpathy_dataset_index(data_path, tokenizer, split=("test", ), split_name="test") class CaptioningDataset(BaseDataset): def __init__(self, data_path, split, transform, tokenizer, num_max_bpe_tokens, task, mask_prob): super().__init__( data_path=data_path, split=split, transform=transform, tokenizer=tokenizer, num_max_bpe_tokens=num_max_bpe_tokens, task=task, ) self.mask_token_id = tokenizer.mask_token_id self.language_vocab_size = tokenizer.vocab_size self.mask_prob = mask_prob @staticmethod def get_index_files(split, task=None): if split == "train": return ("coco_captioning.train.jsonl", ) elif split == "val": return (f"{task}.val.jsonl", ) elif split == "test": return (f"{task}.test.jsonl", ) else: raise RuntimeError("split %s is not found!" % split) def _get_mask_token(self, token): p = random.random() if p < 0.8: return self.mask_token_id elif p < 0.9: return token else: return random.randint(3, self.language_vocab_size - 1) def _masking_on_text_tokens(self, tokens, num_tokens, mask_prob): bool_masked_pos = [0] * len(tokens) to_mask = min(int(num_tokens * mask_prob + 0.5), num_tokens - 1) to_mask = max(to_mask, 1) num_masked_tokens = 0 while num_masked_tokens < to_mask: i = random.randint(1, num_tokens - 1) if bool_masked_pos[i] == 0: bool_masked_pos[i] = 1 tokens[i] = self._get_mask_token(tokens[i]) num_masked_tokens += 1 return tokens, bool_masked_pos def __getitem__(self, index: int): data = dict() item = self.items[index] img_path = item["image_path"] img = self._get_image(img_path) data["image"] = img data["image_id"] = item["image_id"] text_segment = item["text_segment"] if text_segment is not None: language_tokens, padding_mask, num_tokens = self._get_text_segment(text_segment) masked_tokens = language_tokens[:] masked_tokens, language_masked_pos = \ self._masking_on_text_tokens(masked_tokens, num_tokens, self.mask_prob) data["language_tokens"] = language_tokens data["masked_tokens"] = masked_tokens data["language_masked_pos"] = language_masked_pos data["padding_mask"] = padding_mask return data @staticmethod def make_coco_captioning_dataset_index(data_path, tokenizer): _make_captioning_coco_karpathy_dataset_index(data_path, tokenizer, split=("train", "restval"), split_name="train") _make_captioning_coco_karpathy_dataset_index(data_path, tokenizer, split=("val", ), split_name="val") _make_captioning_coco_karpathy_dataset_index(data_path, tokenizer, split=("test", ), split_name="test") @staticmethod def make_nocaps_captioning_dataset_index(data_path): _make_nocaps_dataset_index(data_path, split="val") _make_nocaps_dataset_index(data_path, split="test") task2dataset = { "nlvr2": NLVR2Dataset, "vqav2": VQAv2Dataset, "flickr30k": RetrievalDataset, "coco_retrieval": RetrievalDataset, "coco_captioning": CaptioningDataset, "nocaps": CaptioningDataset, "imagenet": ImageNetDataset, } def create_dataloader(dataset, is_train, batch_size, num_workers, pin_mem, dist_eval=False): if is_train or dist_eval: num_tasks = utils.get_world_size() global_rank = utils.get_rank() if not is_train and dist_eval and len(dataset) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler = torch.utils.data.DistributedSampler( dataset, num_replicas=num_tasks, rank=global_rank, shuffle=is_train ) else: sampler = torch.utils.data.SequentialSampler(dataset) return torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_mem, drop_last=is_train, collate_fn=utils.merge_batch_tensors_by_dict_key, ) def build_transform(is_train, args): if args.task in ["imagenet"]: return build_imagenet_transform(is_train, args) if is_train: t = [ RandomResizedCropAndInterpolation(args.input_size, scale=(0.5, 1.0), interpolation=args.train_interpolation), transforms.RandomHorizontalFlip(), ] if args.randaug: t.append( RandomAugment( 2, 7, isPIL=True, augs=[ 'Identity','AutoContrast','Equalize','Brightness','Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate', ])) t += [ transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), ] t = transforms.Compose(t) else: t = transforms.Compose([ transforms.Resize((args.input_size, args.input_size), interpolation=3), transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD) ]) return t def build_imagenet_transform(is_train, args): resize_im = args.input_size > 32 if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop( args.input_size, padding=4) return transform t = [] if resize_im: if args.crop_pct is None: args.crop_pct = 1.0 size = int(args.input_size / args.crop_pct) t.append( transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)) return transforms.Compose(t) def get_sentencepiece_model_for_beit3(args): from transformers import XLMRobertaTokenizer return XLMRobertaTokenizer(args.sentencepiece_model) def create_dataset_by_split(args, split, is_train=True): transform = build_transform(is_train=is_train, args=args) dataset_class = task2dataset[args.task] tokenizer = get_sentencepiece_model_for_beit3(args) opt_kwargs = {} if args.task in ["coco_captioning", "nocaps"]: opt_kwargs["mask_prob"] = args.captioning_mask_prob dataset = dataset_class( data_path=args.data_path, split=split, transform=transform, tokenizer=tokenizer, num_max_bpe_tokens=args.num_max_bpe_tokens, task=args.task, **opt_kwargs, ) if is_train: batch_size = args.batch_size elif hasattr(args, "eval_batch_size") and args.eval_batch_size is not None: batch_size = args.eval_batch_size else: batch_size = int(args.batch_size * 1.5) return create_dataloader( dataset, is_train=is_train, batch_size=batch_size, num_workers=args.num_workers, pin_mem=args.pin_mem, dist_eval=args.dist_eval, ) def create_downstream_dataset(args, is_eval=False): if is_eval: return create_dataset_by_split(args, split="test", is_train=False) else: return \ create_dataset_by_split(args, split="train", is_train=True), \ create_dataset_by_split(args, split="val", is_train=True)
EXA-1-master
exa/models/unilm-master/beit3/datasets.py
import cv2 import numpy as np ## aug functions def identity_func(img): return img def autocontrast_func(img, cutoff=0): ''' same output as PIL.ImageOps.autocontrast ''' n_bins = 256 def tune_channel(ch): n = ch.size cut = cutoff * n // 100 if cut == 0: high, low = ch.max(), ch.min() else: hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) low = np.argwhere(np.cumsum(hist) > cut) low = 0 if low.shape[0] == 0 else low[0] high = np.argwhere(np.cumsum(hist[::-1]) > cut) high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] if high <= low: table = np.arange(n_bins) else: scale = (n_bins - 1) / (high - low) offset = -low * scale table = np.arange(n_bins) * scale + offset table[table < 0] = 0 table[table > n_bins - 1] = n_bins - 1 table = table.clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def equalize_func(img): ''' same output as PIL.ImageOps.equalize PIL's implementation is different from cv2.equalize ''' n_bins = 256 def tune_channel(ch): hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) non_zero_hist = hist[hist != 0].reshape(-1) step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) if step == 0: return ch n = np.empty_like(hist) n[0] = step // 2 n[1:] = hist[:-1] table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def rotate_func(img, degree, fill=(0, 0, 0)): ''' like PIL, rotate by degree, not radians ''' H, W = img.shape[0], img.shape[1] center = W / 2, H / 2 M = cv2.getRotationMatrix2D(center, degree, 1) out = cv2.warpAffine(img, M, (W, H), borderValue=fill) return out def solarize_func(img, thresh=128): ''' same output as PIL.ImageOps.posterize ''' table = np.array([el if el < thresh else 255 - el for el in range(256)]) table = table.clip(0, 255).astype(np.uint8) out = table[img] return out def color_func(img, factor): ''' same output as PIL.ImageEnhance.Color ''' ## implementation according to PIL definition, quite slow # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis] # out = blend(degenerate, img, factor) # M = ( # np.eye(3) * factor # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor) # )[np.newaxis, np.newaxis, :] M = ( np.float32([ [0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]) * factor + np.float32([[0.114], [0.587], [0.299]]) ) out = np.matmul(img, M).clip(0, 255).astype(np.uint8) return out def contrast_func(img, factor): """ same output as PIL.ImageEnhance.Contrast """ mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) table = np.array([( el - mean) * factor + mean for el in range(256) ]).clip(0, 255).astype(np.uint8) out = table[img] return out def brightness_func(img, factor): ''' same output as PIL.ImageEnhance.Contrast ''' table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) out = table[img] return out def sharpness_func(img, factor): ''' The differences the this result and PIL are all on the 4 boundaries, the center areas are same ''' kernel = np.ones((3, 3), dtype=np.float32) kernel[1][1] = 5 kernel /= 13 degenerate = cv2.filter2D(img, -1, kernel) if factor == 0.0: out = degenerate elif factor == 1.0: out = img else: out = img.astype(np.float32) degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) out = out.astype(np.uint8) return out def shear_x_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, factor, 0], [0, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def translate_x_func(img, offset, fill=(0, 0, 0)): ''' same output as PIL.Image.transform ''' H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, -offset], [0, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def translate_y_func(img, offset, fill=(0, 0, 0)): ''' same output as PIL.Image.transform ''' H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [0, 1, -offset]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def posterize_func(img, bits): ''' same output as PIL.ImageOps.posterize ''' out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) return out def shear_y_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [factor, 1, 0]]) out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8) return out def cutout_func(img, pad_size, replace=(0, 0, 0)): replace = np.array(replace, dtype=np.uint8) H, W = img.shape[0], img.shape[1] rh, rw = np.random.random(2) pad_size = pad_size // 2 ch, cw = int(rh * H), int(rw * W) x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) out = img.copy() out[x1:x2, y1:y2, :] = replace return out ### level to args def enhance_level_to_args(MAX_LEVEL): def level_to_args(level): return ((level / MAX_LEVEL) * 1.8 + 0.1,) return level_to_args def shear_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 0.3 if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * float(translate_const) if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): def level_to_args(level): level = int((level / MAX_LEVEL) * cutout_const) return (level, replace_value) return level_to_args def solarize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 256) return (level, ) return level_to_args def none_level_to_args(level): return () def posterize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 4) return (level, ) return level_to_args def rotate_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 30 if np.random.random() < 0.5: level = -level return (level, replace_value) return level_to_args func_dict = { 'Identity': identity_func, 'AutoContrast': autocontrast_func, 'Equalize': equalize_func, 'Rotate': rotate_func, 'Solarize': solarize_func, 'Color': color_func, 'Contrast': contrast_func, 'Brightness': brightness_func, 'Sharpness': sharpness_func, 'ShearX': shear_x_func, 'TranslateX': translate_x_func, 'TranslateY': translate_y_func, 'Posterize': posterize_func, 'ShearY': shear_y_func, } translate_const = 10 MAX_LEVEL = 10 replace_value = (128, 128, 128) arg_dict = { 'Identity': none_level_to_args, 'AutoContrast': none_level_to_args, 'Equalize': none_level_to_args, 'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value), 'Solarize': solarize_level_to_args(MAX_LEVEL), 'Color': enhance_level_to_args(MAX_LEVEL), 'Contrast': enhance_level_to_args(MAX_LEVEL), 'Brightness': enhance_level_to_args(MAX_LEVEL), 'Sharpness': enhance_level_to_args(MAX_LEVEL), 'ShearX': shear_level_to_args(MAX_LEVEL, replace_value), 'TranslateX': translate_level_to_args( translate_const, MAX_LEVEL, replace_value ), 'TranslateY': translate_level_to_args( translate_const, MAX_LEVEL, replace_value ), 'Posterize': posterize_level_to_args(MAX_LEVEL), 'ShearY': shear_level_to_args(MAX_LEVEL, replace_value), } class RandomAugment(object): def __init__(self, N=2, M=10, isPIL=False, augs=[]): self.N = N self.M = M self.isPIL = isPIL if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N) return [(op, 0.5, self.M) for op in sampled_ops] def __call__(self, img): if self.isPIL: img = np.array(img) ops = self.get_random_ops() for name, prob, level in ops: if np.random.random() > prob: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return img if __name__ == '__main__': a = RandomAugment() img = np.random.randn(32, 32, 3) a(img)
EXA-1-master
exa/models/unilm-master/beit3/randaug.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import datetime import io import os import math import time import json import argparse import numpy as np from pathlib import Path from collections import defaultdict, deque from timm.utils import get_state_dict import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch._six import inf from torchmetrics import Metric from tensorboardX import SummaryWriter def bool_flag(s): """ Parse boolean arguments from the command line. """ FALSY_STRINGS = {"off", "false", "0"} TRUTHY_STRINGS = {"on", "true", "1"} if s.lower() in FALSY_STRINGS: return False elif s.lower() in TRUTHY_STRINGS: return True else: raise argparse.ArgumentTypeError("invalid value for a boolean flag") class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' log_msg = [ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) class TensorboardLogger(object): def __init__(self, log_dir): self.writer = SummaryWriter(logdir=log_dir) self.step = 0 def set_step(self, step=None): if step is not None: self.step = step else: self.step += 1 def update(self, head='scalar', step=None, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step) def flush(self): self.writer.flush() def _load_checkpoint_for_ema(model_ema, checkpoint): """ Workaround for ModelEma._load_checkpoint to accept an already-loaded object """ mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def _get_rank_env(): if "RANK" in os.environ: return int(os.environ["RANK"]) else: return int(os.environ['OMPI_COMM_WORLD_RANK']) def _get_local_rank_env(): if "LOCAL_RANK" in os.environ: return int(os.environ["LOCAL_RANK"]) else: return int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) def _get_world_size_env(): if "WORLD_SIZE" in os.environ: return int(os.environ["WORLD_SIZE"]) else: return int(os.environ['OMPI_COMM_WORLD_SIZE']) # The implementation code is modified from DeiT (https://github.com/facebookresearch/deit.git) def init_distributed_mode(args): if args.dist_on_itp: args.rank = _get_rank_env() args.world_size = _get_world_size_env() # int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = _get_local_rank_env() args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}, gpu {}'.format( args.rank, args.dist_url, args.gpu), flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(0, 7200) ) torch.distributed.barrier() setup_for_distributed(args.rank == 0) def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix=prefix) warn_missing_keys = [] ignore_missing_keys = [] for key in missing_keys: keep_flag = True for ignore_key in ignore_missing.split('|'): if ignore_key in key: keep_flag = False break if keep_flag: warn_missing_keys.append(key) else: ignore_missing_keys.append(key) missing_keys = warn_missing_keys if len(missing_keys) > 0: print("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: print("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(ignore_missing_keys) > 0: print("Ignored weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, ignore_missing_keys)) if len(error_msgs) > 0: print('\n'.join(error_msgs)) class NativeScalerWithGradNormCount: state_dict_key = "amp_scaler" def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=create_graph) if update_grad: if clip_grad is not None: assert parameters is not None self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) else: self._scaler.unscale_(optimizer) norm = get_grad_norm_(parameters) self._scaler.step(optimizer) self._scaler.update() else: norm = None return norm def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict) def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if p.grad is not None] norm_type = float(norm_type) if len(parameters) == 0: return torch.tensor(0.) device = parameters[0].grad.device if norm_type == inf: total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=-1, sched_type="cos"): warmup_schedule = np.array([]) warmup_iters = warmup_epochs * niter_per_ep if warmup_steps > 0: warmup_iters = warmup_steps print("Set warmup steps = %d" % warmup_iters) if warmup_epochs > 0: warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) if sched_type == "cos": iters = np.arange(epochs * niter_per_ep - warmup_iters) schedule = np.array([ final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) elif sched_type == "linear": schedule = np.linspace(base_value, final_value, epochs * niter_per_ep - warmup_iters) else: raise NotImplementedError() schedule = np.concatenate((warmup_schedule, schedule)) assert len(schedule) == epochs * niter_per_ep return schedule def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if loss_scaler is not None: checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch)] for checkpoint_path in checkpoint_paths: to_save = { 'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args, } if model_ema is not None: to_save['model_ema'] = get_state_dict(model_ema) save_on_master(to_save, checkpoint_path) else: client_state = {'epoch': epoch, "args": args} if model_ema is not None: client_state['model_ema'] = get_state_dict(model_ema) model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch, client_state=client_state) def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if loss_scaler is not None: # torch.amp if args.auto_resume and len(args.resume) == 0: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) print("Auto resume checkpoint: %s" % args.resume) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print("Resume checkpoint %s" % args.resume) if 'optimizer' in checkpoint and 'epoch' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = checkpoint['epoch'] + 1 if hasattr(args, 'model_ema') and args.model_ema: _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if 'scaler' in checkpoint: loss_scaler.load_state_dict(checkpoint['scaler']) print("With optim & sched!") else: # deepspeed, only support '--auto_resume'. if args.auto_resume: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt) print("Auto resume checkpoint: %d" % latest_ckpt) _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt) args.start_epoch = client_states['epoch'] + 1 if model_ema is not None: if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema']) # The implementation code is modified from DeiT (https://github.com/facebookresearch/deit.git) def load_model_and_may_interpolate(ckpt_path, model, model_key, model_prefix): if ckpt_path.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( ckpt_path, map_location='cpu', check_hash=True) else: checkpoint = torch.load(ckpt_path, map_location='cpu') print("Load ckpt from %s" % ckpt_path) checkpoint_model = None for model_key in model_key.split('|'): if model_key in checkpoint: checkpoint_model = checkpoint[model_key] print("Load state_dict by model_key = %s" % model_key) break if checkpoint_model is None: checkpoint_model = checkpoint state_dict = model.state_dict() for k in ['head.weight', 'head.bias']: if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: print(f"Removing key {k} from pretrained checkpoint") del checkpoint_model[k] # interpolate position embedding for pos_embed_key in ("vision_pos_embed", "pos_embed", "beit3.encoder.embed_positions.A.weight"): if pos_embed_key in checkpoint_model: pos_embed_checkpoint = checkpoint_model[pos_embed_key] embedding_size = pos_embed_checkpoint.shape[-1] if pos_embed_key == "beit3.encoder.embed_positions.A.weight": # being consistent with Fairseq, which starts from 2 for position embedding torchscale_model = True num_patches = model.beit3.vision_embed.num_patches num_extra_tokens = model.beit3.vision_embed.num_position_embeddings() + 2 - num_patches else: torchscale_model = False num_patches = model.patch_embed.num_patches num_extra_tokens = getattr(model, pos_embed_key).shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) if torchscale_model: extra_tokens = pos_embed_checkpoint[:num_extra_tokens].unsqueeze(0) # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[num_extra_tokens:] else: extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) if torchscale_model: new_pos_embed = new_pos_embed.squeeze(0) checkpoint_model[pos_embed_key] = new_pos_embed load_state_dict(model, checkpoint_model, prefix=model_prefix) def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json") with open(args.deepspeed_config, mode="w") as writer: ds_config = { "train_batch_size": args.batch_size * args.update_freq * get_world_size(), "train_micro_batch_size_per_gpu": args.batch_size, "steps_per_print": 1000, "optimizer": { "type": "Adam", "adam_w_mode": True, "params": { "lr": args.lr, "weight_decay": args.weight_decay, "bias_correction": True, "betas": [ args.opt_betas[0], args.opt_betas[1] ], "eps": args.opt_eps } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": getattr(args, "initial_scale_power", 12), "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 }, "amp": { "enabled": False, "opt_level": "O2" } } if args.clip_grad is not None: ds_config.update({'gradient_clipping': args.clip_grad}) if args.zero_stage == 1: ds_config.update({"zero_optimization": {"stage": args.zero_stage, "reduce_bucket_size": 5e8}}) elif args.zero_stage > 1: raise NotImplementedError() writer.write(json.dumps(ds_config, indent=2)) def merge_batch_tensors_by_dict_key(batch): batch_tensors = {} for tensor_key in batch[0]: if isinstance(batch[0][tensor_key], torch.Tensor): batch_tensors[tensor_key] = torch.stack([d[tensor_key] for d in batch]) else: batch_tensors[tensor_key] = torch.tensor([d[tensor_key] for d in batch], dtype=torch.long) return batch_tensors def get_loss_scale_for_deepspeed(model): optimizer = model.optimizer loss_scale = None if hasattr(optimizer, 'loss_scale'): loss_scale = optimizer.loss_scale elif hasattr(optimizer, 'cur_scale'): loss_scale = optimizer.cur_scale return loss_scale class GatherLayer(torch.autograd.Function): """ Gather tensors from all workers with support for backward propagation: This implementation does not cut the gradients as torch.distributed.all_gather does. """ @staticmethod def forward(ctx, x): output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grads): all_gradients = torch.stack(grads) dist.all_reduce(all_gradients) return all_gradients[dist.get_rank()] def gather_features( image_features, text_features, ): gathered_image_features = GatherLayer.apply(image_features) gathered_text_features = GatherLayer.apply(text_features) all_image_features = torch.cat(gathered_image_features) all_text_features = torch.cat(gathered_text_features) return all_image_features, all_text_features # The implementation code is modified from open_clip (https://github.com/mlfoundations/open_clip.git) class ClipLoss(nn.Module): def __init__( self, cache_labels=False, rank=0, world_size=1, ): super().__init__() self.cache_labels = cache_labels self.rank = rank self.world_size = world_size # cache state self.prev_num_logits = 0 self.labels = {} def forward(self, image_features, text_features, logit_scale): device = image_features.device if self.world_size > 1: all_image_features, all_text_features = gather_features( image_features, text_features ) logits_per_image = logit_scale * image_features @ all_text_features.T logits_per_text = logit_scale * text_features @ all_image_features.T else: logits_per_image = logit_scale * image_features @ text_features.T logits_per_text = logit_scale * text_features @ image_features.T # calculated ground-truth and cache if enabled num_logits = logits_per_image.shape[0] if self.prev_num_logits != num_logits or device not in self.labels: labels = torch.arange(num_logits, device=device, dtype=torch.long) if self.world_size > 1: labels = labels + num_logits * self.rank if self.cache_labels: self.labels[device] = labels self.prev_num_logits = num_logits else: labels = self.labels[device] total_loss = ( F.cross_entropy(logits_per_image, labels) + F.cross_entropy(logits_per_text, labels) ) / 2 return total_loss, logits_per_image, logits_per_text def write_result_to_jsonl(test_stats, result_file): with open(result_file, mode="w", encoding="utf-8") as writer: writer.write(json.dumps(test_stats, indent=None)) def read_result_from_jsonl(result_file): with open(result_file, mode="r", encoding="utf-8") as reader: return json.load(reader) # The implementation code is from ViLT (https://github.com/dandelin/ViLT.git) class VQAScore(Metric): def __init__(self, dist_sync_on_step=False): super().__init__(dist_sync_on_step=dist_sync_on_step) self.add_state("score", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum") def update(self, logits, target): logits, target = ( logits.detach().float().to(self.score.device), target.detach().float().to(self.score.device), ) logits = torch.max(logits, 1)[1] one_hots = torch.zeros(*target.size()).to(target) one_hots.scatter_(1, logits.view(-1, 1), 1) scores = one_hots * target self.score += scores.sum() self.total += len(logits) def compute(self): return self.score / self.total class BertCaptioningLoss(nn.Module): def __init__(self, label_smoothing, drop_worst_ratio, drop_worst_after): super().__init__() self.label_smoothing = label_smoothing self.drop_worst_ratio = drop_worst_ratio self.drop_worst_after = drop_worst_after self.log_soft = nn.LogSoftmax(dim=1) self.kl = nn.KLDivLoss(reduction='none') self.iter = 0 def forward(self, logits, target, iter): eps = self.label_smoothing n_class = logits.size(1) one_hot = torch.zeros_like(logits).scatter(1, target.view(-1, 1), 1) one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) log_prb = self.log_soft(logits) loss = self.kl(log_prb, one_hot).sum(1) if self.drop_worst_ratio > 0 and iter > self.drop_worst_after: loss, _ = torch.topk(loss, k=int(loss.shape[0] * (1-self.drop_worst_ratio)), largest=False) loss = loss.mean() return loss class BeamHypotheses(object): def __init__(self, n_hyp, max_length, length_penalty, early_stopping): """ Initialize n-best list of hypotheses. """ self.max_length = max_length - 1 # ignoring bos_token self.length_penalty = length_penalty self.early_stopping = early_stopping self.n_hyp = n_hyp self.hyp = [] self.worst_score = 1e9 def __len__(self): """ Number of hypotheses in the list. """ return len(self.hyp) def add(self, hyp, sum_logprobs): """ Add a new hypothesis to the list. """ score = sum_logprobs / len(hyp) ** self.length_penalty if len(self) < self.n_hyp or score > self.worst_score: self.hyp.append((score, hyp)) if len(self) > self.n_hyp: sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)]) del self.hyp[sorted_scores[0][1]] self.worst_score = sorted_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs): """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.n_hyp: return False elif self.early_stopping: return True else: return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty def dump_predictions(args, result, file_suffix): global_rank = get_rank() jsons = None if global_rank >= 0: output_file = os.path.join(args.task_cache_path, f"submit_{global_rank}_{file_suffix}.json") with open(output_file, "w") as fp: json.dump(result, fp, indent=2) torch.distributed.barrier() if global_rank == 0: world_size = get_world_size() jsons = [] for i in range(world_size): each_file = os.path.join(args.task_cache_path, f"submit_{i}_{file_suffix}.json") with open(each_file, "r") as fp: jsons += json.load(fp) new_jsons = [] res_dict = dict() if args.task in ["coco_captioning", "nocaps"]: qid_key = "image_id" else: # for VQAv2 qid_key = "question_id" for item in jsons: if item[qid_key] in res_dict: continue new_jsons.append(item) res_dict[item[qid_key]] = item jsons = new_jsons torch.distributed.barrier() os.remove(output_file) else: jsons = result result_file = os.path.join(args.output_dir, f"submit_{file_suffix}.json") if jsons is not None: with open(result_file, "w") as fp: json.dump(jsons, fp, indent=2) print("Infer %d examples into %s" % (len(jsons), result_file)) return result_file # The evaluation code is from BLIP (https://github.com/salesforce/BLIP) # For nocaps, please submit the prediction file to the evaluate server (https://eval.ai/web/challenges/challenge-page/355/overview) to obtain the final results def coco_caption_eval(gt_dir, results_file, split): from pycocotools.coco import COCO from pycocoevalcap.eval import COCOEvalCap from torchvision.datasets.utils import download_url urls = {'coco_captioning_val': 'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json', 'coco_captioning_test': 'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json', 'nocaps_val': 'https://conversationhub.blob.core.windows.net/beit-share-public/beit3/nocaps/nocaps_val_gt.json'} filenames = {'coco_captioning_val':'coco_karpathy_val_gt.json', 'coco_captioning_test':'coco_karpathy_test_gt.json', 'nocaps_val':'nocaps_val_gt.json'} download_url(urls[split], gt_dir) annotation_file = os.path.join(gt_dir, filenames[split]) # create coco object and coco_result object coco = COCO(annotation_file) coco_result = coco.loadRes(results_file) # create coco_eval object by taking coco and coco_result coco_eval = COCOEvalCap(coco, coco_result) # evaluate results # SPICE will take a few minutes the first time, but speeds up due to caching coco_eval.evaluate() res_dict = dict() for metric, score in coco_eval.eval.items(): res_dict[metric] = score return res_dict
EXA-1-master
exa/models/unilm-master/beit3/utils.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.data.mixup import Mixup from timm.models import create_model from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, \ LayerDecayValueAssigner, get_is_head_flag_for_vit from engine_for_finetuning import train_one_epoch, get_handler, evaluate from datasets import create_downstream_dataset from utils import NativeScalerWithGradNormCount as NativeScaler import utils import modeling_finetune def get_args(): parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False) # Model parameters parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--task', type=str, required=True, choices=['nlvr2', 'vqav2', 'flickr30k', 'coco_retrieval', 'coco_captioning', 'nocaps', 'imagenet'], help='Name of task to fine-tuning') parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--checkpoint_activations', action='store_true', default=None, help='Enable checkpointing to save your memory.') parser.add_argument('--sentencepiece_model', type=str, required=True, help='Sentencepiece model path for the pretrained model.') parser.add_argument('--vocab_size', type=int, default=64010) parser.add_argument('--num_max_bpe_tokens', type=int, default=64) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=[0.9, 0.999], type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: 0.9, 0.999, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--task_head_lr_weight', type=float, default=0) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-6)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--eval_batch_size', default=None, type=int) parser.add_argument('--epochs', default=20, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=5, type=int) # Augmentation parameters parser.add_argument('--randaug', action='store_true', default=False) parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') # Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') # parameter for dump predictions (VQA, COCO captioning, NoCaps) parser.add_argument('--task_cache_path', default=None, type=str) # parameter for imagenet finetuning parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types') parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # augmentation parameters for imagenet finetuning parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)') parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') # evaluation parameters for imagenet parser.add_argument('--crop_pct', type=float, default=None) # random Erase params for imagenet finetuning parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # parameter for captioning finetuning parser.add_argument('--captioning_mask_prob', type=float, default=0.6) parser.add_argument('--drop_worst_ratio', type=float, default=0.2) parser.add_argument('--drop_worst_after', type=int, default=12000) parser.add_argument('--num_beams', type=int, default=3) parser.add_argument('--length_penalty', type=float, default=0.6) # label smoothing for imagenet and captioning parser.add_argument('--label_smoothing', type=float, default=0.1) # deepspeed parameters parser.add_argument('--enable_deepspeed', action='store_true', default=False) parser.add_argument('--initial_scale_power', type=int, default=16) parser.add_argument('--zero_stage', default=0, type=int, help='ZeRO optimizer stage (default: 0)') known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: import deepspeed from deepspeed import DeepSpeedConfig parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) if args.task_cache_path is None: args.task_cache_path = args.output_dir print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) # random.seed(seed) cudnn.benchmark = True if utils.get_rank() == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train, data_loader_val = create_downstream_dataset(args) if not args.model.endswith(args.task): if args.task in ("flickr30k", "coco_retrieval"): model_config = "%s_retrieval" % args.model elif args.task in ("coco_captioning", "nocaps"): model_config = "%s_captioning" % args.model elif args.task in ("imagenet"): model_config = "%s_imageclassification" % args.model else: model_config = "%s_%s" % (args.model, args.task) else: model_config = args.model print("model_config = %s" % model_config) model = create_model( model_config, pretrained=False, drop_path_rate=args.drop_path, vocab_size=args.vocab_size, checkpoint_activations=args.checkpoint_activations, ) if args.finetune: utils.load_model_and_may_interpolate(args.finetune, model, args.model_key, args.model_prefix) model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print('number of params:', n_parameters) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(data_loader_train.dataset) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(data_loader_train.dataset)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) num_layers = model_without_ddp.get_num_layers() if args.layer_decay < 1.0: lrs = list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)) assigner = LayerDecayValueAssigner(lrs) elif args.task_head_lr_weight > 1: assigner = LayerDecayValueAssigner([1.0, args.task_head_lr_weight], scale_handler=get_is_head_flag_for_vit) else: assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = model.no_weight_decay() if args.distributed: torch.distributed.barrier() if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) task_handler = get_handler(args) # mixup for imagenet mixup_fn = None if args.task in ["imagenet", "in1k"]: mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.label_smoothing, num_classes=args.nb_classes) if args.eval: data_loader_test = create_downstream_dataset(args, is_eval=True) if args.task in ["nlvr2", "flickr30k", "coco_retrieval", "imagenet"]: ext_test_stats, task_key = evaluate(data_loader_test, model, device, task_handler) print(f"Accuracy of the network on the {len(data_loader_test.dataset)} test images: {ext_test_stats[task_key]:.3f}%") exit(0) elif args.task == "vqav2": result, _ = evaluate(data_loader_test, model, device, task_handler) utils.dump_predictions(args, result, "vqav2_test") exit(0) elif args.task in ["coco_captioning", "nocaps"]: predictions, _ = evaluate(data_loader_test, model, device, task_handler) prediction_file = utils.dump_predictions(args, predictions, "{}_test".format(args.task)) if utils.is_main_process() and args.task == "coco_captioning": captioning_result = utils.coco_caption_eval(args.output_dir, prediction_file, "{}_test".format(args.task)) result_file = os.path.join(args.output_dir, f"{args.task}_result.json") print(json.dumps(captioning_result)) utils.write_result_to_jsonl(captioning_result, result_file) exit(0) print(f"Start training for {args.epochs} epochs") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if log_writer is not None: log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq) train_stats = train_one_epoch( model, data_loader_train, optimizer, device, task_handler, epoch, epoch * num_training_steps_per_epoch, lr_schedule_values, loss_scaler, args.clip_grad, args.update_freq, model_ema, log_writer, args.task, mixup_fn, ) if args.output_dir and args.save_ckpt: if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: utils.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema) if data_loader_val is not None: if args.task not in ["coco_captioning", "nocaps"]: test_stats, task_key = evaluate(data_loader_val, model, device, task_handler) else: predictions, _ = evaluate(data_loader_val, model, device, task_handler) prediction_file = utils.dump_predictions(args, predictions, f"{args.task}_val_e{epoch}") result_file = os.path.join(args.output_dir, f"{args.task}_result_val_e{epoch}.json") task_key = "CIDEr" if utils.is_main_process(): test_stats = utils.coco_caption_eval(args.output_dir, prediction_file, "{}_val".format(args.task)) utils.write_result_to_jsonl(test_stats, result_file) torch.distributed.barrier() if not utils.is_main_process(): test_stats = utils.read_result_from_jsonl(result_file) print(f"Performance of the network on the {len(data_loader_val.dataset)} val images: {test_stats[task_key]:.1f}%") if max_accuracy < test_stats[task_key]: max_accuracy = test_stats[task_key] if args.output_dir and args.save_ckpt: utils.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch="best", model_ema=model_ema) print(f'Max performance: {max_accuracy:.2f}%') if log_writer is not None: log_writer.update(acc=test_stats[task_key], head="perf", step=epoch) log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'val_{k}': v for k, v in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} else: log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, # **{f'test_{k}': v for k, v in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if args.output_dir and utils.is_main_process(): if log_writer is not None: log_writer.flush() with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: f.write(json.dumps(log_stats) + "\n") total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) if __name__ == '__main__': opts, ds_init = get_args() if opts.output_dir: Path(opts.output_dir).mkdir(parents=True, exist_ok=True) main(opts, ds_init)
EXA-1-master
exa/models/unilm-master/beit3/run_beit3_finetuning.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import math import torch import torch.nn as nn from timm.models.layers import trunc_normal_ as __call_trunc_normal_ from torchscale.model.BEiT3 import BEiT3 from torchscale.architecture.config import EncoderConfig def trunc_normal_(tensor, mean=0., std=1.): __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) def _get_base_config( img_size=224, patch_size=16, drop_path_rate=0, checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs ): return EncoderConfig( img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True, layernorm_embedding=False, normalize_output=True, no_output_layer=True, drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12, encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12, checkpoint_activations=checkpoint_activations, ) def _get_large_config( img_size=224, patch_size=16, drop_path_rate=0, checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs ): return EncoderConfig( img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True, layernorm_embedding=False, normalize_output=True, no_output_layer=True, drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16, encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24, checkpoint_activations=checkpoint_activations, ) class BEiT3Wrapper(nn.Module): def __init__(self, args, **kwargs): super().__init__() self.args = args self.beit3 = BEiT3(args) self.apply(self._init_weights) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def get_num_layers(self): return self.beit3.encoder.num_layers @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'beit3.encoder.embed_positions.A.weight', 'beit3.vision_embed.cls_token', 'logit_scale'} def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0)
EXA-1-master
exa/models/unilm-master/beit3/modeling_utils.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' import torch import torch.nn as nn import torch.nn.functional as F from timm.models.registry import register_model import numpy as np import utils from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config class TwoLayerMLP(nn.Module): def __init__( self, in_features, hidden_features, out_features, norm_layer, norm_input=True, ): super().__init__() self.norm1 = norm_layer(in_features) if norm_input else nn.Identity() self.dense1 = nn.Linear(in_features, hidden_features) self.norm2 = norm_layer(hidden_features) self.act = nn.GELU() self.dense2 = nn.Linear(hidden_features, out_features) def forward(self, x): x = self.norm1(x) x = self.dense1(x) x = self.norm2(x) x = self.act(x) return self.dense2(x) class Pooler(nn.Module): def __init__(self, input_features, output_features, norm_layer): super().__init__() self.norm = norm_layer(input_features) self.dense = nn.Linear(input_features, output_features) self.activation = nn.Tanh() def forward(self, x): cls_rep = x[:, 0, :] cls_rep = self.norm(cls_rep) pooled_output = self.dense(cls_rep) pooled_output = self.activation(pooled_output) return pooled_output class BEiT3ForVisualReasoning(BEiT3Wrapper): def __init__( self, args, num_classes, norm_layer=nn.LayerNorm, **kwargs ): super(BEiT3ForVisualReasoning, self).__init__(args=args) embed_dim = args.encoder_embed_dim self.head = TwoLayerMLP( in_features=embed_dim * 4, hidden_features=embed_dim * 2, out_features=num_classes, norm_layer=norm_layer, ) init_scale = 0.001 self.head.apply(self._init_weights) if isinstance(self.head.dense1, nn.Linear): self.head.dense1.weight.data.mul_(init_scale) self.head.dense1.bias.data.mul_(init_scale) if isinstance(self.head.dense2, nn.Linear): self.head.dense2.weight.data.mul_(init_scale) self.head.dense2.bias.data.mul_(init_scale) def forward(self, image_a, image_b, text_description, padding_mask, **kwargs): bsz, _ = text_description.size() vision_input = torch.cat((image_a, image_b), dim=0) language_input = torch.cat((text_description, text_description), dim=0) padding_mask = torch.cat((padding_mask, padding_mask), dim=0) outputs = self.beit3( textual_tokens=language_input, visual_tokens=vision_input, text_padding_position=padding_mask, ) x = outputs["encoder_out"] multiway_split_position = outputs["multiway_split_position"] vision_cls = x[:, 0, :] language_cls = x[:, multiway_split_position, :] cls_rep = torch.cat((vision_cls, language_cls), dim=-1) a, b = torch.split(cls_rep, split_size_or_sections=[bsz, bsz], dim=0) cls_rep = torch.cat((a, b), dim=-1) return self.head(cls_rep) class BEiT3ForImageClassification(BEiT3Wrapper): def __init__( self, args, num_classes, norm_layer=nn.LayerNorm, **kwargs ): super(BEiT3ForImageClassification, self).__init__(args=args) embed_dim = args.encoder_embed_dim self.fc_norm = norm_layer(embed_dim) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.fc_norm.apply(self._init_weights) self.head.apply(self._init_weights) init_scale = 0.001 if isinstance(self.head, nn.Linear): self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def forward(self, image, **kwargs): x = self.beit3(textual_tokens=None, visual_tokens=image)["encoder_out"] t = x[:, 1:, :] cls_x = self.fc_norm(t.mean(1)) return self.head(cls_x) class BEiT3ForCaptioning(BEiT3Wrapper): def __init__( self, args, **kwargs ): super(BEiT3ForCaptioning, self).__init__(args=args) embed_dim = args.encoder_embed_dim self.mlm_head = nn.Linear(embed_dim, args.vocab_size) self.mlm_head.apply(self._init_weights) def forward(self, image, text_ids, padding_mask, language_masked_pos, text_len=None, incremental_state=None, **kwargs): text_len = text_len if text_len is not None else text_ids.size(1) image_len = self.beit3.vision_embed.num_position_embeddings() max_len = text_len + image_len uni_mask = torch.zeros((max_len, max_len), dtype=torch.long, device=text_ids.device) i_start, i_end = 0, image_len t_start, t_end = image_len, max_len # triangle mask for caption to caption uni_mask[t_start:t_end, t_start:t_end] = torch.tril(torch.ones(text_len, text_len, dtype=torch.long, device=text_ids.device)) # full attention for caption to image uni_mask[t_start:t_end, i_start:i_end] = 1 # full attention for image to image uni_mask[i_start:i_end, i_start:i_end] = 1 uni_mask = 1-uni_mask if incremental_state is not None: for idx in range(self.get_num_layers()): if idx not in incremental_state: incremental_state[idx] = {} # for incremental decoding positions = None if image is None: uni_mask = uni_mask[-2:] padding_mask = None # start position (2 (fairseq starts at 2) + cur_position) is equal to text_len positions = torch.arange(text_len, text_ids.size(1) + text_len, device=text_ids.device).long().unsqueeze(0) outputs = self.beit3( textual_tokens=text_ids, visual_tokens=image, text_padding_position=padding_mask, attn_mask=uni_mask, incremental_state=incremental_state, positions=positions, ) if image is not None: text_feats = outputs["encoder_out"][:, image_len:] else: text_feats = outputs["encoder_out"] if language_masked_pos is not None: text_feats = text_feats[language_masked_pos.bool()] return self.mlm_head(text_feats), incremental_state class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper): def __init__( self, args, num_classes, norm_layer=nn.LayerNorm, **kwargs ): super(BEiT3ForVisualQuestionAnswering, self).__init__(args=args) embed_dim = args.encoder_embed_dim self.pooler = Pooler( input_features=embed_dim, output_features=embed_dim, norm_layer=norm_layer, ) self.pooler.apply(self._init_weights) self.head = nn.Sequential( nn.Linear(embed_dim, embed_dim * 2), norm_layer(embed_dim * 2), nn.GELU(), nn.Linear(embed_dim * 2, num_classes), ) self.head.apply(self._init_weights) def forward(self, image, question, padding_mask, **kwargs): outputs = self.beit3( textual_tokens=question, visual_tokens=image, text_padding_position=padding_mask, ) x = outputs["encoder_out"] cls_rep = self.pooler(x) return self.head(cls_rep) class BEiT3ForRetrieval(BEiT3Wrapper): def __init__( self, args, **kwargs ): super(BEiT3ForRetrieval, self).__init__(args=args) embed_dim = args.encoder_embed_dim self.language_head = nn.Linear(embed_dim, embed_dim, bias=False) self.vision_head = nn.Linear(embed_dim, embed_dim, bias=False) self.language_head.apply(self._init_weights) self.vision_head.apply(self._init_weights) self.criterion = utils.ClipLoss( rank=utils.get_rank(), world_size=utils.get_world_size(), ) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def forward(self, image=None, text_description=None, padding_mask=None, only_infer=False, **kwargs): if image is not None: outputs = self.beit3( textual_tokens=None, visual_tokens=image, text_padding_position=None, ) x = outputs["encoder_out"] vision_cls = self.vision_head(x[:, 0, :]) vision_cls = F.normalize(vision_cls, dim=-1) else: vision_cls = None if text_description is not None: outputs = self.beit3( textual_tokens=text_description, visual_tokens=None, text_padding_position=padding_mask, ) x = outputs["encoder_out"] language_cls = self.language_head(x[:, 0, :]) language_cls = F.normalize(language_cls, dim=-1) else: language_cls = None if only_infer: return vision_cls, language_cls else: loss, logits_per_image, logits_per_text = self.criterion( vision_cls, language_cls, self.logit_scale.exp()) return loss, vision_cls, language_cls @register_model def beit3_base_patch16_224_imageclassification(pretrained=False, **kwargs): args = _get_base_config(**kwargs) args.normalize_output = False model = BEiT3ForImageClassification(args, num_classes=1000, **kwargs) return model @register_model def beit3_large_patch16_224_imageclassification(pretrained=False, **kwargs): args = _get_large_config(**kwargs) args.normalize_output = False model = BEiT3ForImageClassification(args, num_classes=1000, **kwargs) return model @register_model def beit3_base_patch16_224_nlvr2(pretrained=False, **kwargs): args = _get_base_config(**kwargs) model = BEiT3ForVisualReasoning(args, num_classes=2, **kwargs) return model @register_model def beit3_large_patch16_224_nlvr2(pretrained=False, **kwargs): args = _get_large_config(**kwargs) model = BEiT3ForVisualReasoning(args, num_classes=2, **kwargs) return model @register_model def beit3_base_patch16_384_vqav2(pretrained=False, **kwargs): args = _get_base_config(img_size=384, **kwargs) args.normalize_output = False model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs) return model @register_model def beit3_base_patch16_480_vqav2(pretrained=False, **kwargs): args = _get_base_config(img_size=480, **kwargs) args.normalize_output = False model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs) return model @register_model def beit3_large_patch16_384_vqav2(pretrained=False, **kwargs): args = _get_large_config(img_size=384, **kwargs) args.normalize_output = False model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs) return model @register_model def beit3_large_patch16_480_vqav2(pretrained=False, **kwargs): args = _get_large_config(img_size=480, **kwargs) args.normalize_output = False model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs) return model @register_model def beit3_large_patch16_768_vqav2(pretrained=False, **kwargs): args = _get_large_config(img_size=768, **kwargs) args.normalize_output = False model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs) return model @register_model def beit3_base_patch16_224_captioning(pretrained=False, **kwargs): args = _get_base_config(**kwargs) model = BEiT3ForCaptioning(args, **kwargs) return model @register_model def beit3_base_patch16_480_captioning(pretrained=False, **kwargs): args = _get_base_config(img_size=480, **kwargs) model = BEiT3ForCaptioning(args, **kwargs) return model @register_model def beit3_large_patch16_480_captioning(pretrained=False, **kwargs): args = _get_large_config(img_size=480, **kwargs) model = BEiT3ForCaptioning(args, **kwargs) return model @register_model def beit3_base_patch16_224_retrieval(pretrained=False, **kwargs): args = _get_base_config(**kwargs) model = BEiT3ForRetrieval(args, **kwargs) return model @register_model def beit3_base_patch16_384_retrieval(pretrained=False, **kwargs): args = _get_base_config(img_size=384, **kwargs) model = BEiT3ForRetrieval(args, **kwargs) return model @register_model def beit3_large_patch16_384_retrieval(pretrained=False, **kwargs): args = _get_large_config(img_size=384, **kwargs) model = BEiT3ForRetrieval(args, **kwargs) return model
EXA-1-master
exa/models/unilm-master/beit3/modeling_finetune.py
# -------------------------------------------------------- # Image as a Foreign Language: BEiT Pretraining for Vision and Vision-Language Tasks (https://arxiv.org/abs/2208.10442) # Github source: https://github.com/microsoft/unilm/tree/master/beit3 # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # --------------------------------------------------------' from torch import optim as optim from timm.optim.lookahead import Lookahead import json def get_num_layer_for_vit(var_name, num_max_layer): if "embed" in var_name: return 0 elif var_name in ( "cls_token", "mask_token", "pos_embed", "language_pos_embed", "word_embeddings.weight", "vision_cls_token", "vision_pos_embed" ): return 0 elif var_name.startswith("patch_embed"): return 0 elif var_name.startswith("rel_pos_bias"): return num_max_layer - 1 elif "layers." in var_name: layer_id = int(var_name.split('layers.')[1].split('.')[0]) return layer_id + 1 else: return num_max_layer - 1 def get_is_head_flag_for_vit(var_name, num_max_layer): if var_name.startswith("head"): return 1 # elif var_name.startswith("pooler"): # return 1 else: return 0 class LayerDecayValueAssigner(object): def __init__(self, values, scale_handler=None): self.scale_handler = scale_handler or get_num_layer_for_vit self.values = values def get_scale(self, layer_id): return self.values[layer_id] def get_layer_id(self, var_name): return self.scale_handler(var_name, len(self.values)) # The implementation code is modified from Timm (https://github.com/huggingface/pytorch-image-models/tree/main/timm def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None): parameter_group_names = {} parameter_group_vars = {} for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: group_name = "no_decay" this_weight_decay = 0. else: group_name = "decay" this_weight_decay = weight_decay if get_num_layer is not None: layer_id = get_num_layer(name) group_name = "layer_%d_%s" % (layer_id, group_name) else: layer_id = None if group_name not in parameter_group_names: if get_layer_scale is not None: scale = get_layer_scale(layer_id) else: scale = 1. parameter_group_names[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name]["params"].append(param) parameter_group_names[group_name]["params"].append(name) print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) return list(parameter_group_vars.values()) def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if skip_list is not None: skip = skip_list elif hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) weight_decay = 0. else: parameters = model.parameters() opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) else: raise ValueError("Invalid optimizer") if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer
EXA-1-master
exa/models/unilm-master/beit3/optim_factory.py
import re contractions = { "aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", "hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", "he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", "Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", "maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", "she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", "somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", "somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", "someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", "something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", "there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", "they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", "wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", "whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", "whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", "youll": "you'll", "youre": "you're", "youve": "you've", } manual_map = { "none": "0", "zero": "0", "one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10", } articles = ["a", "an", "the"] period_strip = re.compile("(?!<=\d)(\.)(?!\d)") comma_strip = re.compile("(\d)(\,)(\d)") punct = [ ";", r"/", "[", "]", '"', "{", "}", "(", ")", "=", "+", "\\", "_", "-", ">", "<", "@", "`", ",", "?", "!", ] def normalize_word(token): _token = token for p in punct: if (p + " " in token or " " + p in token) or ( re.search(comma_strip, token) != None ): _token = _token.replace(p, "") else: _token = _token.replace(p, " ") token = period_strip.sub("", _token, re.UNICODE) _token = [] temp = token.lower().split() for word in temp: word = manual_map.setdefault(word, word) if word not in articles: _token.append(word) for i, word in enumerate(_token): if word in contractions: _token[i] = contractions[word] token = " ".join(_token) token = token.replace(",", "") return token
EXA-1-master
exa/models/unilm-master/beit3/glossary.py
#!/usr/bin/env python3 from setuptools import find_packages, setup setup( name="layoutlmv3", version="0.1", author="LayoutLM Team", url="https://github.com/microsoft/unilm/tree/master/layoutlmv3", packages=find_packages(), python_requires=">=3.7", extras_require={"dev": ["flake8", "isort", "black"]}, )
EXA-1-master
exa/models/unilm-master/layoutlmv3/setup.py
from .models import ( LayoutLMv3Config, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3Tokenizer, )
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/__init__.py
from .layoutlmv3 import ( LayoutLMv3Config, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3Tokenizer, )
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/__init__.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for LayoutLMv3, refer to RoBERTa.""" from transformers.models.roberta import RobertaTokenizer from transformers.utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } class LayoutLMv3Tokenizer(RobertaTokenizer): vocab_files_names = VOCAB_FILES_NAMES # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"]
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3.py
from transformers import AutoConfig, AutoModel, AutoModelForTokenClassification, \ AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, RobertaConverter from .configuration_layoutlmv3 import LayoutLMv3Config from .modeling_layoutlmv3 import ( LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3Model, ) from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast AutoConfig.register("layoutlmv3", LayoutLMv3Config) AutoModel.register(LayoutLMv3Config, LayoutLMv3Model) AutoModelForTokenClassification.register(LayoutLMv3Config, LayoutLMv3ForTokenClassification) AutoModelForQuestionAnswering.register(LayoutLMv3Config, LayoutLMv3ForQuestionAnswering) AutoModelForSequenceClassification.register(LayoutLMv3Config, LayoutLMv3ForSequenceClassification) AutoTokenizer.register( LayoutLMv3Config, slow_tokenizer_class=LayoutLMv3Tokenizer, fast_tokenizer_class=LayoutLMv3TokenizerFast ) SLOW_TO_FAST_CONVERTERS.update({"LayoutLMv3Tokenizer": RobertaConverter})
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/layoutlmv3/__init__.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for LayoutLMv3, refer to RoBERTa.""" from transformers.models.roberta.tokenization_roberta_fast import RobertaTokenizerFast from transformers.utils import logging from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class LayoutLMv3TokenizerFast(RobertaTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = LayoutLMv3Tokenizer
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3_fast.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LayoutLMv3 model. """ import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import apply_chunking_to_forward from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, ) from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from transformers.models.roberta.modeling_roberta import ( RobertaIntermediate, RobertaLMHead, RobertaOutput, RobertaSelfOutput, ) from transformers.utils import logging from .configuration_layoutlmv3 import LayoutLMv3Config from timm.models.layers import to_2tuple logger = logging.get_logger(__name__) class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) # The following variables are used in detection mycheckpointer.py self.num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.num_patches_w = self.patch_shape[0] self.num_patches_h = self.patch_shape[1] def forward(self, x, position_embedding=None): x = self.proj(x) if position_embedding is not None: # interpolate the position embedding to the corresponding size position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, 1, 2) Hp, Wp = x.shape[2], x.shape[3] position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic') x = x + position_embedding x = x.flatten(2).transpose(1, 2) return x class LayoutLMv3Embeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) def _calc_spatial_position_embeddings(self, bbox): try: assert torch.all(0 <= bbox) and torch.all(bbox <= 1023) left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023)) w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023)) # below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) return spatial_position_embeddings def create_position_ids_from_input_ids(self, input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings spatial_position_embeddings = self._calc_spatial_position_embeddings(bbox) embeddings = embeddings + spatial_position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor≈ Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LayoutLMv3PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv3Config base_model_prefix = "layoutlmv3" # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class LayoutLMv3SelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def cogview_attn(self, attention_scores, alpha=32): ''' https://arxiv.org/pdf/2105.13290.pdf Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores) Seems the new attention_probs will result in a slower speed and a little bias Can use torch.allclose(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison The smaller atol (e.g., 1e-08), the better. ''' scaled_attention_scores = attention_scores / alpha max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1) # max_value = scaled_attention_scores.amax(dim=(-2, -1)).unsqueeze(-1).unsqueeze(-1) new_attention_scores = (scaled_attention_scores - max_value) * alpha return nn.Softmax(dim=-1)(new_attention_scores) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # The attention scores QT K/√d could be significantly larger than input elements, and result in overflow. # Changing the computational order into QT(K/√d) alleviates the problem. (https://arxiv.org/pdf/2105.13290.pdf) attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2)) if self.has_relative_attention_bias and self.has_spatial_attention_bias: attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size) elif self.has_relative_attention_bias: attention_scores += rel_pos / math.sqrt(self.attention_head_size) # if self.has_relative_attention_bias: # attention_scores += rel_pos # if self.has_spatial_attention_bias: # attention_scores += rel_2d_pos # attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. # attention_probs = nn.Softmax(dim=-1)(attention_scores) # comment the line below and use this line for speedup attention_probs = self.cogview_attn(attention_scores) # to stablize training # assert torch.allclose(attention_probs, nn.Softmax(dim=-1)(attention_scores), atol=1e-8) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class LayoutLMv3Attention(nn.Module): def __init__(self, config): super().__init__() self.self = LayoutLMv3SelfAttention(config) self.output = RobertaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class LayoutLMv3Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMv3Attention(config) assert not config.is_decoder and not config.add_cross_attention, \ "This version do not support decoder. Please refer to RoBERTa for implementation of is_decoder." self.intermediate = RobertaIntermediate(config) self.output = RobertaOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LayoutLMv3Encoder(nn.Module): def __init__(self, config, detection=False, out_features=None): super().__init__() self.config = config self.detection = detection self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_onehot_size = config.rel_pos_bins self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) if self.detection: self.gradient_checkpointing = True embed_dim = self.config.hidden_size self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) self.ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret def _cal_1d_pos_emb(self, hidden_states, position_ids, valid_span): VISUAL_NUM = 196 + 1 rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1) if valid_span is not None: # for the text part, if two words are not in the same line, # set their distance to the max value (position_ids.shape[-1]) rel_pos_mat[(rel_pos_mat > 0) & (valid_span == False)] = position_ids.shape[1] rel_pos_mat[(rel_pos_mat < 0) & (valid_span == False)] = -position_ids.shape[1] # image-text, minimum distance rel_pos_mat[:, -VISUAL_NUM:, :-VISUAL_NUM] = 0 rel_pos_mat[:, :-VISUAL_NUM, -VISUAL_NUM:] = 0 rel_pos = self.relative_position_bucket( rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos, ) rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states) rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2) rel_pos = rel_pos.contiguous() return rel_pos def _cal_2d_pos_emb(self, hidden_states, bbox): position_coord_x = bbox[:, :, 0] position_coord_y = bbox[:, :, 3] rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1) rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1) rel_pos_x = self.relative_position_bucket( rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_y = self.relative_position_bucket( rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2) rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2) rel_pos_x = rel_pos_x.contiguous() rel_pos_y = rel_pos_y.contiguous() rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def forward( self, hidden_states, bbox=None, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, position_ids=None, Hp=None, Wp=None, valid_span=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids, valid_span) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None if self.detection: feat_out = {} j = 0 for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) # return module(*inputs, past_key_value, output_attentions, rel_pos, rel_2d_pos) # The above line will cause error: # RuntimeError: Trying to backward through the graph a second time # (or directly access saved tensors after they have already been freed). return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos, rel_2d_pos ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.detection and i in self.out_indices: xp = hidden_states[:, -Hp*Wp:, :].permute(0, 2, 1).reshape(len(hidden_states), -1, Hp, Wp) feat_out[self.out_features[j]] = self.ops[j](xp.contiguous()) j += 1 if self.detection: return feat_out if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class LayoutLMv3Model(LayoutLMv3PreTrainedModel): """ """ _keys_to_ignore_on_load_missing = [r"position_ids"] # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta def __init__(self, config, detection=False, out_features=None, image_only=False): super().__init__(config) self.config = config assert not config.is_decoder and not config.add_cross_attention, \ "This version do not support decoder. Please refer to RoBERTa for implementation of is_decoder." self.detection = detection if not self.detection: self.image_only = False else: assert config.visual_embed self.image_only = image_only if not self.image_only: self.embeddings = LayoutLMv3Embeddings(config) self.encoder = LayoutLMv3Encoder(config, detection=detection, out_features=out_features) if config.visual_embed: embed_dim = self.config.hidden_size # use the default pre-training parameters for fine-tuning (e.g., input_size) # when the input_size is larger in fine-tuning, we will interpolate the position embedding in forward self.patch_embed = PatchEmbed(embed_dim=embed_dim) patch_size = 16 size = int(self.config.input_size / patch_size) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, embed_dim)) self.pos_drop = nn.Dropout(p=0.) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: self._init_visual_bbox(img_size=(size, size)) from functools import partial norm_layer = partial(nn.LayerNorm, eps=1e-6) self.norm = norm_layer(embed_dim) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _init_visual_bbox(self, img_size=(14, 14), max_len=1000): visual_bbox_x = torch.div(torch.arange(0, max_len * (img_size[1] + 1), max_len), img_size[1], rounding_mode='trunc') visual_bbox_y = torch.div(torch.arange(0, max_len * (img_size[0] + 1), max_len), img_size[0], rounding_mode='trunc') visual_bbox = torch.stack( [ visual_bbox_x[:-1].repeat(img_size[0], 1), visual_bbox_y[:-1].repeat(img_size[1], 1).transpose(0, 1), visual_bbox_x[1:].repeat(img_size[0], 1), visual_bbox_y[1:].repeat(img_size[1], 1).transpose(0, 1), ], dim=-1, ).view(-1, 4) cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]]) self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0) def _calc_visual_bbox(self, device, dtype, bsz): # , img_size=(14, 14), max_len=1000): visual_bbox = self.visual_bbox.repeat(bsz, 1, 1) visual_bbox = visual_bbox.to(device).type(dtype) return visual_bbox def forward_image(self, x): if self.detection: x = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) else: x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None and self.detection: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None and not self.detection: x = x + self.pos_embed x = self.pos_drop(x) x = self.norm(x) return x # Copied from transformers.models.bert.modeling_bert.BertModel.forward def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, valid_span=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, images=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = False # if input_ids is not None and inputs_embeds is not None: # raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") if input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif images is not None: batch_size = len(images) device = images.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or images") if not self.image_only: # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. # extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if not self.image_only: if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) final_bbox = final_position_ids = None Hp = Wp = None if images is not None: patch_size = 16 Hp, Wp = int(images.shape[2] / patch_size), int(images.shape[3] / patch_size) visual_emb = self.forward_image(images) if self.detection: visual_attention_mask = torch.ones((batch_size, visual_emb.shape[1]), dtype=torch.long, device=device) if self.image_only: attention_mask = visual_attention_mask else: attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1) elif self.image_only: attention_mask = torch.ones((batch_size, visual_emb.shape[1]), dtype=torch.long, device=device) if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_spatial_attention_bias: visual_bbox = self._calc_visual_bbox(device, dtype=torch.long, bsz=batch_size) if self.image_only: final_bbox = visual_bbox else: final_bbox = torch.cat([bbox, visual_bbox], dim=1) visual_position_ids = torch.arange(0, visual_emb.shape[1], dtype=torch.long, device=device).repeat( batch_size, 1) if self.image_only: final_position_ids = visual_position_ids else: position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0) position_ids = position_ids.expand_as(input_ids) final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1) if self.image_only: embedding_output = visual_emb else: embedding_output = torch.cat([embedding_output, visual_emb], dim=1) embedding_output = self.LayerNorm(embedding_output) embedding_output = self.dropout(embedding_output) elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_spatial_attention_bias: final_bbox = bbox if self.config.has_relative_attention_bias: position_ids = self.embeddings.position_ids[:, :input_shape[1]] position_ids = position_ids.expand_as(input_ids) final_position_ids = position_ids extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, None, device) encoder_outputs = self.encoder( embedding_output, bbox=final_bbox, position_ids=final_position_ids, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, Hp=Hp, Wp=Wp, valid_span=valid_span, ) if self.detection: return encoder_outputs sequence_output = encoder_outputs[0] pooled_output = None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class LayoutLMv3ClassificationHead(nn.Module): """ Head for sentence-level classification tasks. Reference: RobertaClassificationHead """ def __init__(self, config, pool_feature=False): super().__init__() self.pool_feature = pool_feature if pool_feature: self.dense = nn.Linear(config.hidden_size*3, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, x): # x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv3 = LayoutLMv3Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) if config.num_labels < 10: self.classifier = nn.Linear(config.hidden_size, config.num_labels) else: self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, valid_span=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, images=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, images=images, valid_span=valid_span, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv3 = LayoutLMv3Model(config) # self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, valid_span=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, bbox=None, images=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, images=images, valid_span=valid_span, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.layoutlmv3 = LayoutLMv3Model(config) self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, valid_span=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, bbox=None, images=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, images=images, valid_span=valid_span, ) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/layoutlmv3/modeling_layoutlmv3.py
# coding=utf-8 from transformers.models.bert.configuration_bert import BertConfig from transformers.utils import logging logger = logging.get_logger(__name__) LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP = { "layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json", "layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/resolve/main/config.json", # See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3 } class LayoutLMv3Config(BertConfig): model_type = "layoutlmv3" def __init__( self, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_2d_position_embeddings=1024, coordinate_size=None, shape_size=None, has_relative_attention_bias=False, rel_pos_bins=32, max_rel_pos=128, has_spatial_attention_bias=False, rel_2d_pos_bins=64, max_rel_2d_pos=256, visual_embed=True, mim=False, wpa_task=False, discrete_vae_weight_path='', discrete_vae_type='dall-e', input_size=224, second_input_size=112, device='cuda', **kwargs ): """Constructs RobertaConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.max_2d_position_embeddings = max_2d_position_embeddings self.coordinate_size = coordinate_size self.shape_size = shape_size self.has_relative_attention_bias = has_relative_attention_bias self.rel_pos_bins = rel_pos_bins self.max_rel_pos = max_rel_pos self.has_spatial_attention_bias = has_spatial_attention_bias self.rel_2d_pos_bins = rel_2d_pos_bins self.max_rel_2d_pos = max_rel_2d_pos self.visual_embed = visual_embed self.mim = mim self.wpa_task = wpa_task self.discrete_vae_weight_path = discrete_vae_weight_path self.discrete_vae_type = discrete_vae_type self.input_size = input_size self.second_input_size = second_input_size self.device = device
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/models/layoutlmv3/configuration_layoutlmv3.py
import torchvision.transforms.functional as F import warnings import math import random import numpy as np from PIL import Image import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] def load_image(image_path): image = read_image(image_path, format="BGR") h = image.shape[0] w = image.shape[1] img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)]) image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable return image, (w, h) def crop(image, i, j, h, w, boxes=None): cropped_image = F.crop(image, i, j, h, w) if boxes is not None: # Currently we cannot use this case since when some boxes is out of the cropped image, # it may be better to drop out these boxes along with their text input (instead of min or clamp) # which haven't been implemented here max_size = torch.as_tensor([w, h], dtype=torch.float32) cropped_boxes = torch.as_tensor(boxes) - torch.as_tensor([j, i, j, i]) cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) cropped_boxes = cropped_boxes.clamp(min=0) boxes = cropped_boxes.reshape(-1, 4) return cropped_image, boxes def resize(image, size, interpolation, boxes=None): # It seems that we do not need to resize boxes here, since the boxes will be resized to 1000x1000 finally, # which is compatible with a square image size of 224x224 rescaled_image = F.resize(image, size, interpolation) if boxes is None: return rescaled_image, None ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) ratio_width, ratio_height = ratios # boxes = boxes.copy() scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) return rescaled_image, scaled_boxes def clamp(num, min_value, max_value): return max(min(num, max_value), min_value) def get_bb(bb, page_size): bbs = [float(j) for j in bb] xs, ys = [], [] for i, b in enumerate(bbs): if i % 2 == 0: xs.append(b) else: ys.append(b) (width, height) = page_size return_bb = [ clamp(min(xs), 0, width - 1), clamp(min(ys), 0, height - 1), clamp(max(xs), 0, width - 1), clamp(max(ys), 0, height - 1), ] return_bb = [ int(1000 * return_bb[0] / width), int(1000 * return_bb[1] / height), int(1000 * return_bb[2] / width), int(1000 * return_bb[3] / height), ] return return_bb class ToNumpy: def __call__(self, pil_img): np_img = np.array(pil_img, dtype=np.uint8) if np_img.ndim < 3: np_img = np.expand_dims(np_img, axis=-1) np_img = np.rollaxis(np_img, 2) # HWC to CHW return np_img class ToTensor: def __init__(self, dtype=torch.float32): self.dtype = dtype def __call__(self, pil_img): np_img = np.array(pil_img, dtype=np.uint8) if np_img.ndim < 3: np_img = np.expand_dims(np_img, axis=-1) np_img = np.rollaxis(np_img, 2) # HWC to CHW return torch.from_numpy(np_img).to(dtype=self.dtype) _pil_interpolation_to_str = { F.InterpolationMode.NEAREST: 'F.InterpolationMode.NEAREST', F.InterpolationMode.BILINEAR: 'F.InterpolationMode.BILINEAR', F.InterpolationMode.BICUBIC: 'F.InterpolationMode.BICUBIC', F.InterpolationMode.LANCZOS: 'F.InterpolationMode.LANCZOS', F.InterpolationMode.HAMMING: 'F.InterpolationMode.HAMMING', F.InterpolationMode.BOX: 'F.InterpolationMode.BOX', } def _pil_interp(method): if method == 'bicubic': return F.InterpolationMode.BICUBIC elif method == 'lanczos': return F.InterpolationMode.LANCZOS elif method == 'hamming': return F.InterpolationMode.HAMMING else: # default bilinear, do we want to allow nearest? return F.InterpolationMode.BILINEAR class Compose: """Composes several transforms together. This transform does not support torchscript. Please, see the note below. Args: transforms (list of ``Transform`` objects): list of transforms to compose. Example: >>> transforms.Compose([ >>> transforms.CenterCrop(10), >>> transforms.PILToTensor(), >>> transforms.ConvertImageDtype(torch.float), >>> ]) .. note:: In order to script the transformations, please use ``torch.nn.Sequential`` as below. >>> transforms = torch.nn.Sequential( >>> transforms.CenterCrop(10), >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), >>> ) >>> scripted_transforms = torch.jit.script(transforms) Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require `lambda` functions or ``PIL.Image``. """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img, augmentation=False, box=None): for t in self.transforms: img = t(img, augmentation, box) return img class RandomResizedCropAndInterpolationWithTwoPic: """Crop the given PIL Image to random size and aspect ratio with random interpolation. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear', second_interpolation='lanczos'): if isinstance(size, tuple): self.size = size else: self.size = (size, size) if second_size is not None: if isinstance(second_size, tuple): self.second_size = second_size else: self.second_size = (second_size, second_size) else: self.second_size = None if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): warnings.warn("range should be of kind (min, max)") self.interpolation = _pil_interp(interpolation) self.second_interpolation = _pil_interp(second_interpolation) self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if in_ratio < min(ratio): w = img.size[0] h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w def __call__(self, img, augmentation=False, box=None): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ if augmentation: i, j, h, w = self.get_params(img, self.scale, self.ratio) img = F.crop(img, i, j, h, w) # img, box = crop(img, i, j, h, w, box) img = F.resize(img, self.size, self.interpolation) second_img = F.resize(img, self.second_size, self.second_interpolation) \ if self.second_size is not None else None return img, second_img def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) else: interpolate_str = _pil_interpolation_to_str[self.interpolation] format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) format_string += ', interpolation={0}'.format(interpolate_str) if self.second_size is not None: format_string += ', second_size={0}'.format(self.second_size) format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation]) format_string += ')' return format_string def pil_loader(path: str) -> Image.Image: # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB')
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/image_utils.py
import os import json import torch from torch.utils.data.dataset import Dataset from torchvision import transforms from PIL import Image from layoutlmft.data.image_utils import Compose, RandomResizedCropAndInterpolationWithTwoPic XFund_label2ids = { "O":0, 'B-HEADER':1, 'I-HEADER':2, 'B-QUESTION':3, 'I-QUESTION':4, 'B-ANSWER':5, 'I-ANSWER':6, } class xfund_dataset(Dataset): def box_norm(self, box, width, height): def clip(min_num, num, max_num): return min(max(num, min_num), max_num) x0, y0, x1, y1 = box x0 = clip(0, int((x0 / width) * 1000), 1000) y0 = clip(0, int((y0 / height) * 1000), 1000) x1 = clip(0, int((x1 / width) * 1000), 1000) y1 = clip(0, int((y1 / height) * 1000), 1000) assert x1 >= x0 assert y1 >= y0 return [x0, y0, x1, y1] def get_segment_ids(self, bboxs): segment_ids = [] for i in range(len(bboxs)): if i == 0: segment_ids.append(0) else: if bboxs[i - 1] == bboxs[i]: segment_ids.append(segment_ids[-1]) else: segment_ids.append(segment_ids[-1] + 1) return segment_ids def get_position_ids(self, segment_ids): position_ids = [] for i in range(len(segment_ids)): if i == 0: position_ids.append(2) else: if segment_ids[i] == segment_ids[i - 1]: position_ids.append(position_ids[-1] + 1) else: position_ids.append(2) return position_ids def load_data( self, data_file, ): # re-org data format total_data = {"id": [], "lines": [], "bboxes": [], "ner_tags": [], "image_path": []} for i in range(len(data_file['documents'])): width, height = data_file['documents'][i]['img']['width'], data_file['documents'][i]['img'][ 'height'] cur_doc_lines, cur_doc_bboxes, cur_doc_ner_tags, cur_doc_image_path = [], [], [], [] for j in range(len(data_file['documents'][i]['document'])): cur_item = data_file['documents'][i]['document'][j] cur_doc_lines.append(cur_item['text']) cur_doc_bboxes.append(self.box_norm(cur_item['box'], width=width, height=height)) cur_doc_ner_tags.append(cur_item['label']) total_data['id'] += [len(total_data['id'])] total_data['lines'] += [cur_doc_lines] total_data['bboxes'] += [cur_doc_bboxes] total_data['ner_tags'] += [cur_doc_ner_tags] total_data['image_path'] += [data_file['documents'][i]['img']['fname']] # tokenize text and get bbox/label total_input_ids, total_bboxs, total_label_ids = [], [], [] for i in range(len(total_data['lines'])): cur_doc_input_ids, cur_doc_bboxs, cur_doc_labels = [], [], [] for j in range(len(total_data['lines'][i])): cur_input_ids = self.tokenizer(total_data['lines'][i][j], truncation=False, add_special_tokens=False, return_attention_mask=False)['input_ids'] if len(cur_input_ids) == 0: continue cur_label = total_data['ner_tags'][i][j].upper() if cur_label == 'OTHER': cur_labels = ["O"] * len(cur_input_ids) for k in range(len(cur_labels)): cur_labels[k] = self.label2ids[cur_labels[k]] else: cur_labels = [cur_label] * len(cur_input_ids) cur_labels[0] = self.label2ids['B-' + cur_labels[0]] for k in range(1, len(cur_labels)): cur_labels[k] = self.label2ids['I-' + cur_labels[k]] assert len(cur_input_ids) == len([total_data['bboxes'][i][j]] * len(cur_input_ids)) == len(cur_labels) cur_doc_input_ids += cur_input_ids cur_doc_bboxs += [total_data['bboxes'][i][j]] * len(cur_input_ids) cur_doc_labels += cur_labels assert len(cur_doc_input_ids) == len(cur_doc_bboxs) == len(cur_doc_labels) assert len(cur_doc_input_ids) > 0 total_input_ids.append(cur_doc_input_ids) total_bboxs.append(cur_doc_bboxs) total_label_ids.append(cur_doc_labels) assert len(total_input_ids) == len(total_bboxs) == len(total_label_ids) # split text to several slices because of over-length input_ids, bboxs, labels = [], [], [] segment_ids, position_ids = [], [] image_path = [] for i in range(len(total_input_ids)): start = 0 cur_iter = 0 while start < len(total_input_ids[i]): end = min(start + 510, len(total_input_ids[i])) input_ids.append([self.tokenizer.cls_token_id] + total_input_ids[i][start: end] + [self.tokenizer.sep_token_id]) bboxs.append([[0, 0, 0, 0]] + total_bboxs[i][start: end] + [[1000, 1000, 1000, 1000]]) labels.append([-100] + total_label_ids[i][start: end] + [-100]) cur_segment_ids = self.get_segment_ids(bboxs[-1]) cur_position_ids = self.get_position_ids(cur_segment_ids) segment_ids.append(cur_segment_ids) position_ids.append(cur_position_ids) image_path.append(os.path.join(self.args.data_dir, "images", total_data['image_path'][i])) start = end cur_iter += 1 assert len(input_ids) == len(bboxs) == len(labels) == len(segment_ids) == len(position_ids) assert len(segment_ids) == len(image_path) res = { 'input_ids': input_ids, 'bbox': bboxs, 'labels': labels, 'segment_ids': segment_ids, 'position_ids': position_ids, 'image_path': image_path, } return res def __init__( self, args, tokenizer, mode ): self.args = args self.mode = mode self.cur_la = args.language self.tokenizer = tokenizer self.label2ids = XFund_label2ids self.common_transform = Compose([ RandomResizedCropAndInterpolationWithTwoPic( size=args.input_size, interpolation=args.train_interpolation, ), ]) self.patch_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=torch.tensor((0.5, 0.5, 0.5)), std=torch.tensor((0.5, 0.5, 0.5))) ]) data_file = json.load( open(os.path.join(args.data_dir, "{}.{}.json".format(self.cur_la, 'train' if mode == 'train' else 'val')), 'r')) self.feature = self.load_data(data_file) def __len__(self): return len(self.feature['input_ids']) def __getitem__(self, index): input_ids = self.feature["input_ids"][index] # attention_mask = self.feature["attention_mask"][index] attention_mask = [1] * len(input_ids) labels = self.feature["labels"][index] bbox = self.feature["bbox"][index] segment_ids = self.feature['segment_ids'][index] position_ids = self.feature['position_ids'][index] img = pil_loader(self.feature['image_path'][index]) for_patches, _ = self.common_transform(img, augmentation=False) patch = self.patch_transform(for_patches) assert len(input_ids) == len(attention_mask) == len(labels) == len(bbox) == len(segment_ids) res = { "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, "bbox": bbox, "segment_ids": segment_ids, "position_ids": position_ids, "images": patch, } return res def pil_loader(path: str) -> Image.Image: # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB')
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/xfund.py
''' Reference: https://huggingface.co/datasets/pierresi/cord/blob/main/cord.py ''' import json import os from pathlib import Path import datasets from layoutlmft.data.image_utils import load_image, normalize_bbox logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{park2019cord, title={CORD: A Consolidated Receipt Dataset for Post-OCR Parsing}, author={Park, Seunghyun and Shin, Seung and Lee, Bado and Lee, Junyeop and Surh, Jaeheung and Seo, Minjoon and Lee, Hwalsuk} booktitle={Document Intelligence Workshop at Neural Information Processing Systems} year={2019} } """ _DESCRIPTION = """\ https://github.com/clovaai/cord/ """ def quad_to_box(quad): # test 87 is wrongly annotated box = ( max(0, quad["x1"]), max(0, quad["y1"]), quad["x3"], quad["y3"] ) if box[3] < box[1]: bbox = list(box) tmp = bbox[3] bbox[3] = bbox[1] bbox[1] = tmp box = tuple(bbox) if box[2] < box[0]: bbox = list(box) tmp = bbox[2] bbox[2] = bbox[0] bbox[0] = tmp box = tuple(bbox) return box def _get_drive_url(url): base_url = 'https://drive.google.com/uc?id=' split_url = url.split('/') return base_url + split_url[5] _URLS = [ _get_drive_url("https://drive.google.com/file/d/1MqhTbcj-AHXOqYoeoh12aRUwIprzTJYI/"), _get_drive_url("https://drive.google.com/file/d/1wYdp5nC9LnHQZ2FcmOoC0eClyWvcuARU/") # If you failed to download the dataset through the automatic downloader, # you can download it manually and modify the code to get the local dataset. # Or you can use the following links. Please follow the original LICENSE of CORD for usage. # "https://layoutlm.blob.core.windows.net/cord/CORD-1k-001.zip", # "https://layoutlm.blob.core.windows.net/cord/CORD-1k-002.zip" ] class CordConfig(datasets.BuilderConfig): """BuilderConfig for CORD""" def __init__(self, **kwargs): """BuilderConfig for CORD. Args: **kwargs: keyword arguments forwarded to super. """ super(CordConfig, self).__init__(**kwargs) class Cord(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ CordConfig(name="cord", version=datasets.Version("1.0.0"), description="CORD dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "words": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=["O","B-MENU.NM","B-MENU.NUM","B-MENU.UNITPRICE","B-MENU.CNT","B-MENU.DISCOUNTPRICE","B-MENU.PRICE","B-MENU.ITEMSUBTOTAL","B-MENU.VATYN","B-MENU.ETC","B-MENU.SUB_NM","B-MENU.SUB_UNITPRICE","B-MENU.SUB_CNT","B-MENU.SUB_PRICE","B-MENU.SUB_ETC","B-VOID_MENU.NM","B-VOID_MENU.PRICE","B-SUB_TOTAL.SUBTOTAL_PRICE","B-SUB_TOTAL.DISCOUNT_PRICE","B-SUB_TOTAL.SERVICE_PRICE","B-SUB_TOTAL.OTHERSVC_PRICE","B-SUB_TOTAL.TAX_PRICE","B-SUB_TOTAL.ETC","B-TOTAL.TOTAL_PRICE","B-TOTAL.TOTAL_ETC","B-TOTAL.CASHPRICE","B-TOTAL.CHANGEPRICE","B-TOTAL.CREDITCARDPRICE","B-TOTAL.EMONEYPRICE","B-TOTAL.MENUTYPE_CNT","B-TOTAL.MENUQTY_CNT","I-MENU.NM","I-MENU.NUM","I-MENU.UNITPRICE","I-MENU.CNT","I-MENU.DISCOUNTPRICE","I-MENU.PRICE","I-MENU.ITEMSUBTOTAL","I-MENU.VATYN","I-MENU.ETC","I-MENU.SUB_NM","I-MENU.SUB_UNITPRICE","I-MENU.SUB_CNT","I-MENU.SUB_PRICE","I-MENU.SUB_ETC","I-VOID_MENU.NM","I-VOID_MENU.PRICE","I-SUB_TOTAL.SUBTOTAL_PRICE","I-SUB_TOTAL.DISCOUNT_PRICE","I-SUB_TOTAL.SERVICE_PRICE","I-SUB_TOTAL.OTHERSVC_PRICE","I-SUB_TOTAL.TAX_PRICE","I-SUB_TOTAL.ETC","I-TOTAL.TOTAL_PRICE","I-TOTAL.TOTAL_ETC","I-TOTAL.CASHPRICE","I-TOTAL.CHANGEPRICE","I-TOTAL.CREDITCARDPRICE","I-TOTAL.EMONEYPRICE","I-TOTAL.MENUTYPE_CNT","I-TOTAL.MENUQTY_CNT"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), "image_path": datasets.Value("string"), } ), supervised_keys=None, citation=_CITATION, homepage="https://github.com/clovaai/cord/", ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" """Uses local files located with data_dir""" downloaded_file = dl_manager.download_and_extract(_URLS) # move files from the second URL together with files from the first one. dest = Path(downloaded_file[0])/"CORD" for split in ["train", "dev", "test"]: for file_type in ["image", "json"]: if split == "test" and file_type == "json": continue files = (Path(downloaded_file[1])/"CORD"/split/file_type).iterdir() for f in files: os.rename(f, dest/split/file_type/f.name) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train"} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dest/"dev"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test"} ), ] def get_line_bbox(self, bboxs): x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)] y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)] x0, y0, x1, y1 = min(x), min(y), max(x), max(y) assert x1 >= x0 and y1 >= y0 bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))] return bbox def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "json") img_dir = os.path.join(filepath, "image") for guid, file in enumerate(sorted(os.listdir(ann_dir))): words = [] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for item in data["valid_line"]: cur_line_bboxes = [] line_words, label = item["words"], item["category"] line_words = [w for w in line_words if w["text"].strip() != ""] if len(line_words) == 0: continue if label == "other": for w in line_words: words.append(w["text"]) ner_tags.append("O") cur_line_bboxes.append(normalize_bbox(quad_to_box(w["quad"]), size)) else: words.append(line_words[0]["text"]) ner_tags.append("B-" + label.upper()) cur_line_bboxes.append(normalize_bbox(quad_to_box(line_words[0]["quad"]), size)) for w in line_words[1:]: words.append(w["text"]) ner_tags.append("I-" + label.upper()) cur_line_bboxes.append(normalize_bbox(quad_to_box(w["quad"]), size)) # by default: --segment_level_layout 1 # if do not want to use segment_level_layout, comment the following line cur_line_bboxes = self.get_line_bbox(cur_line_bboxes) bboxes.extend(cur_line_bboxes) # yield guid, {"id": str(guid), "words": words, "bboxes": bboxes, "ner_tags": ner_tags, "image": image} yield guid, {"id": str(guid), "words": words, "bboxes": bboxes, "ner_tags": ner_tags, "image": image, "image_path": image_path}
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/cord.py
# flake8: noqa from .data_collator import DataCollatorForKeyValueExtraction
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/__init__.py
import torch from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from transformers import BatchEncoding, PreTrainedTokenizerBase from transformers.data.data_collator import ( DataCollatorMixin, _torch_collate_batch, ) from transformers.file_utils import PaddingStrategy from typing import NewType InputDataClass = NewType("InputDataClass", Any) def pre_calc_rel_mat(segment_ids): valid_span = torch.zeros((segment_ids.shape[0], segment_ids.shape[1], segment_ids.shape[1]), device=segment_ids.device, dtype=torch.bool) for i in range(segment_ids.shape[0]): for j in range(segment_ids.shape[1]): valid_span[i, j, :] = segment_ids[i, :] == segment_ids[i, j] return valid_span @dataclass class DataCollatorForKeyValueExtraction(DataCollatorMixin): """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (:obj:`int`, `optional`, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None images = None if "images" in features[0]: images = torch.stack([torch.tensor(d.pop("images")) for d in features]) IMAGE_LEN = int(images.shape[-1] / 16) * int(images.shape[-1] / 16) + 1 batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt" if labels is None else None, ) if images is not None: batch["images"] = images batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) and k == 'attention_mask' else v for k, v in batch.items()} visual_attention_mask = torch.ones((len(batch['input_ids']), IMAGE_LEN), dtype=torch.long) batch["attention_mask"] = torch.cat([batch['attention_mask'], visual_attention_mask], dim=1) if labels is None: return batch has_bbox_input = "bbox" in features[0] has_position_input = "position_ids" in features[0] padding_idx=self.tokenizer.pad_token_id sequence_length = torch.tensor(batch["input_ids"]).shape[1] padding_side = self.tokenizer.padding_side if padding_side == "right": batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels] if has_bbox_input: batch["bbox"] = [bbox + [[0, 0, 0, 0]] * (sequence_length - len(bbox)) for bbox in batch["bbox"]] if has_position_input: batch["position_ids"] = [position_id + [padding_idx] * (sequence_length - len(position_id)) for position_id in batch["position_ids"]] else: batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels] if has_bbox_input: batch["bbox"] = [[[0, 0, 0, 0]] * (sequence_length - len(bbox)) + bbox for bbox in batch["bbox"]] if has_position_input: batch["position_ids"] = [[padding_idx] * (sequence_length - len(position_id)) + position_id for position_id in batch["position_ids"]] if 'segment_ids' in batch: assert 'position_ids' in batch for i in range(len(batch['segment_ids'])): batch['segment_ids'][i] = batch['segment_ids'][i] + [batch['segment_ids'][i][-1] + 1] * (sequence_length - len(batch['segment_ids'][i])) + [ batch['segment_ids'][i][-1] + 2] * IMAGE_LEN batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v for k, v in batch.items()} if 'segment_ids' in batch: valid_span = pre_calc_rel_mat( segment_ids=batch['segment_ids'] ) batch['valid_span'] = valid_span del batch['segment_ids'] if images is not None: visual_labels = torch.ones((len(batch['input_ids']), IMAGE_LEN), dtype=torch.long) * -100 batch["labels"] = torch.cat([batch['labels'], visual_labels], dim=1) return batch
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/data_collator.py
# coding=utf-8 ''' Reference: https://huggingface.co/datasets/nielsr/funsd/blob/main/funsd.py ''' import json import os import datasets from layoutlmft.data.image_utils import load_image, normalize_bbox logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{Jaume2019FUNSDAD, title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, author={Guillaume Jaume and H. K. Ekenel and J. Thiran}, journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)}, year={2019}, volume={2}, pages={1-6} } """ _DESCRIPTION = """\ https://guillaumejaume.github.io/FUNSD/ """ class FunsdConfig(datasets.BuilderConfig): """BuilderConfig for FUNSD""" def __init__(self, **kwargs): """BuilderConfig for FUNSD. Args: **kwargs: keyword arguments forwarded to super. """ super(FunsdConfig, self).__init__(**kwargs) class Funsd(datasets.GeneratorBasedBuilder): """Conll2003 dataset.""" BUILDER_CONFIGS = [ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), "image_path": datasets.Value("string"), } ), supervised_keys=None, homepage="https://guillaumejaume.github.io/FUNSD/", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"} ), ] def get_line_bbox(self, bboxs): x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)] y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)] x0, y0, x1, y1 = min(x), min(y), max(x), max(y) assert x1 >= x0 and y1 >= y0 bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))] return bbox def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "annotations") img_dir = os.path.join(filepath, "images") for guid, file in enumerate(sorted(os.listdir(ann_dir))): tokens = [] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for item in data["form"]: cur_line_bboxes = [] words, label = item["words"], item["label"] words = [w for w in words if w["text"].strip() != ""] if len(words) == 0: continue if label == "other": for w in words: tokens.append(w["text"]) ner_tags.append("O") cur_line_bboxes.append(normalize_bbox(w["box"], size)) else: tokens.append(words[0]["text"]) ner_tags.append("B-" + label.upper()) cur_line_bboxes.append(normalize_bbox(words[0]["box"], size)) for w in words[1:]: tokens.append(w["text"]) ner_tags.append("I-" + label.upper()) cur_line_bboxes.append(normalize_bbox(w["box"], size)) # by default: --segment_level_layout 1 # if do not want to use segment_level_layout, comment the following line cur_line_bboxes = self.get_line_bbox(cur_line_bboxes) # box = normalize_bbox(item["box"], size) # cur_line_bboxes = [box for _ in range(len(words))] bboxes.extend(cur_line_bboxes) yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags, "image": image, "image_path": image_path}
EXA-1-master
exa/models/unilm-master/layoutlmv3/layoutlmft/data/funsd.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.xfund import xfund_dataset, XFund_label2ids from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) language: Optional[str] = field( default='zh', metadata={"help": "The dataset in xfund to use"} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) pad_to_max_length: bool = field( default=True, metadata={ "help": "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." }, ) label_all_tokens: bool = field( default=False, metadata={ "help": "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) segment_level_layout: bool = field(default=True) visual_embed: bool = field(default=True) data_dir: Optional[str] = field(default=None) input_size: int = field(default=224, metadata={"help": "images input size for backbone"}) second_input_size: int = field(default=112, metadata={"help": "images input size for discrete vae"}) train_interpolation: str = field( default='bicubic', metadata={"help": "Training interpolation (random, bilinear, bicubic)"}) second_interpolation: str = field( default='lanczos', metadata={"help": "Interpolation for discrete vae (random, bilinear, bicubic)"}) imagenet_default_mean_and_std: bool = field(default=False, metadata={"help": ""}) def main(): # See all possible arguments in layoutlmft/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=7, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, input_size=data_args.input_size, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, tokenizer_file=None, # avoid loading from a cached file of the pre-trained model in another machine cache_dir=model_args.cache_dir, use_fast=True, add_prefix_space=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) train_dataset, eval_dataset, test_dataset = None, None, None if training_args.do_train: train_dataset = xfund_dataset(data_args, tokenizer, 'train') if training_args.do_eval: eval_dataset = xfund_dataset(data_args, tokenizer, 'eval') model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) def get_label_list(): label_list = [[key, val] for key, val in XFund_label2ids.items()] label_list = sorted(label_list, key=lambda x:x[1], reverse=False) label_list = [label for label, id in label_list] return label_list label_list = get_label_list() # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/run_xfund.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) from layoutlmft.data.image_utils import RandomResizedCropAndInterpolationWithTwoPic, pil_loader, Compose from timm.data.constants import \ IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from torchvision import transforms import torch @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default='funsd', metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) pad_to_max_length: bool = field( default=True, metadata={ "help": "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." }, ) label_all_tokens: bool = field( default=False, metadata={ "help": "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) segment_level_layout: bool = field(default=True) visual_embed: bool = field(default=True) data_dir: Optional[str] = field(default=None) input_size: int = field(default=224, metadata={"help": "images input size for backbone"}) second_input_size: int = field(default=112, metadata={"help": "images input size for discrete vae"}) train_interpolation: str = field( default='bicubic', metadata={"help": "Training interpolation (random, bilinear, bicubic)"}) second_interpolation: str = field( default='lanczos', metadata={"help": "Interpolation for discrete vae (random, bilinear, bicubic)"}) imagenet_default_mean_and_std: bool = field(default=False, metadata={"help": ""}) def main(): # See all possible arguments in layoutlmft/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) if data_args.dataset_name == 'funsd': # datasets = load_dataset("nielsr/funsd") import layoutlmft.data.funsd datasets = load_dataset(os.path.abspath(layoutlmft.data.funsd.__file__), cache_dir=model_args.cache_dir) elif data_args.dataset_name == 'cord': import layoutlmft.data.cord datasets = load_dataset(os.path.abspath(layoutlmft.data.cord.__file__), cache_dir=model_args.cache_dir) else: raise NotImplementedError() if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["test"].column_names features = datasets["test"].features text_column_name = "words" if "words" in column_names else "tokens" label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, input_size=data_args.input_size, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, tokenizer_file=None, # avoid loading from a cached file of the pre-trained model in another machine cache_dir=model_args.cache_dir, use_fast=True, add_prefix_space=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False if data_args.visual_embed: imagenet_default_mean_and_std = data_args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD common_transform = Compose([ # transforms.ColorJitter(0.4, 0.4, 0.4), # transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic( size=data_args.input_size, interpolation=data_args.train_interpolation), ]) patch_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std)) ]) # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples, augmentation=False): tokenized_inputs = tokenizer( examples[text_column_name], padding=False, truncation=True, return_overflowing_tokens=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] bboxes = [] images = [] for batch_index in range(len(tokenized_inputs["input_ids"])): word_ids = tokenized_inputs.word_ids(batch_index=batch_index) org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index] label = examples[label_column_name][org_batch_index] bbox = examples["bboxes"][org_batch_index] previous_word_idx = None label_ids = [] bbox_inputs = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) bbox_inputs.append([0, 0, 0, 0]) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) bbox_inputs.append(bbox[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) bbox_inputs.append(bbox[word_idx]) previous_word_idx = word_idx labels.append(label_ids) bboxes.append(bbox_inputs) if data_args.visual_embed: ipath = examples["image_path"][org_batch_index] img = pil_loader(ipath) for_patches, _ = common_transform(img, augmentation=augmentation) patch = patch_transform(for_patches) images.append(patch) tokenized_inputs["labels"] = labels tokenized_inputs["bbox"] = bboxes if data_args.visual_embed: tokenized_inputs["images"] = images return tokenized_inputs if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: validation_name = "test" if validation_name not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets[validation_name] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) eval_dataset = eval_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) test_dataset = test_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/run_funsd_cord.py
import os from PIL import Image import xml.etree.ElementTree as ET import numpy as np import json from PIL import Image from shutil import copyfile def convert(ROOT, TRACK, SPLIT): coco_data = { "images": [], "annotations": [], "categories": [{"id": 1, "name": "table"}, ], } DATA_DIR = f"{ROOT}/{TRACK}/{SPLIT}" prefix = "cTDaR_t0" if TRACK == "trackA_archival" else "cTDaR_t1" print(TRACK, SPLIT, prefix) table_count = 0 for file in sorted(os.listdir(DATA_DIR)): if file.startswith(prefix) and file.endswith(".jpg"): img = Image.open(os.path.join(DATA_DIR, file)) coco_data["images"].append( { "file_name": file, "height": img.height, "width": img.width, "id": int(file[7:-4]), } ) elif file.startswith(prefix) and file.endswith(".xml"): # print(file) tree = ET.parse(os.path.join(DATA_DIR, file)) root = tree.getroot() assert len(root.findall("./table/Coords")) > 0 for table_id in range(len(root.findall("./table/Coords"))): four_points = root.findall("./table/Coords")[table_id].attrib["points"] four_points = list(map(lambda x: x.split(","), four_points.split())) four_points = [[int(j) for j in i] for i in four_points] segmentation = [j for i in four_points for j in i] bbox = [ four_points[0][0], four_points[0][1], four_points[2][0] - four_points[0][0], four_points[2][1] - four_points[0][1], ] coco_data["annotations"].append( { "segmentation": [segmentation], "area": bbox[2] * bbox[3], "iscrowd": 0, "image_id": int(file[7:-4]), "bbox": bbox, "category_id": 1, "id": table_count, } ) table_count += 1 with open(f"{ROOT}/{TRACK}/{SPLIT}.json", "w") as f: json.dump(coco_data, f) def clean_img(DATA_DIR): for file in sorted(os.listdir(DATA_DIR)): if file.endswith(".JPG"): os.rename(os.path.join(DATA_DIR, file), os.path.join(DATA_DIR, file.replace(".JPG", ".jpg"))) elif file.endswith(".TIFF"): img = Image.open(os.path.join(DATA_DIR, file)) img.save(os.path.join(DATA_DIR, file.replace(".TIFF", ".jpg"))) os.remove(os.path.join(DATA_DIR, file)) elif file.endswith(".png"): img = Image.open(os.path.join(DATA_DIR, file)) img.save(os.path.join(DATA_DIR, file.replace(".png", ".jpg"))) os.remove(os.path.join(DATA_DIR, file)) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--root_dir', required=True) parser.add_argument('--target_dir', required=True) args = parser.parse_args() test_data_dir = os.path.join(args.root_dir, 'test', 'TRACKA') test_gt_dir = os.path.join(args.root_dir, 'test_ground_truth', 'TRACKA') training_data_dir = os.path.join(args.root_dir, 'training', 'TRACKA', 'ground_truth') raw_datas = {"train": [training_data_dir], "test": [test_data_dir, test_gt_dir]} TRACKS = ["trackA_modern", "trackA_archival"] SPLITS = ["train", "test"] for track in TRACKS: prefix = "cTDaR_t0" if track == "trackA_archival" else "cTDaR_t1" for split in SPLITS: os.makedirs(os.path.join(args.target_dir, track, split)) for source_dir in raw_datas[split]: for fn in os.listdir(source_dir): if fn.startswith(prefix): ffn = os.path.join(source_dir, fn) copyfile(ffn, os.path.join(args.target_dir, track, split, fn)) clean_img(os.path.join(args.target_dir, track, split)) convert(args.target_dir, track, split)
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/convert_to_coco_format.py
#!/usr/bin/env python # -------------------------------------------------------------------------------- # MPViT: Multi-Path Vision Transformer for Dense Prediction # Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI). # All Rights Reserved. # Written by Youngwan Lee # -------------------------------------------------------------------------------- """ Detection Training Script for MPViT. """ import os import itertools import torch from typing import Any, Dict, List, Set from detectron2.data import build_detection_train_loader from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator from detectron2.solver.build import maybe_add_gradient_clipping from ditod import add_vit_config from ditod import DetrDatasetMapper from detectron2.data.datasets import register_coco_instances import logging from detectron2.utils.logger import setup_logger from detectron2.utils import comm from detectron2.engine.defaults import create_ddp_model import weakref from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer from ditod import MyDetectionCheckpointer, ICDAREvaluator from ditod import MyTrainer def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # add_coat_config(cfg) add_vit_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg def main(args): cfg = setup(args) """ register publaynet first """ register_coco_instances( "publaynet_train", {}, cfg.PUBLAYNET_DATA_DIR_TRAIN + ".json", cfg.PUBLAYNET_DATA_DIR_TRAIN ) register_coco_instances( "publaynet_val", {}, cfg.PUBLAYNET_DATA_DIR_TEST + ".json", cfg.PUBLAYNET_DATA_DIR_TEST ) register_coco_instances( "icdar2019_train", {}, cfg.ICDAR_DATA_DIR_TRAIN + ".json", cfg.ICDAR_DATA_DIR_TRAIN ) register_coco_instances( "icdar2019_test", {}, cfg.ICDAR_DATA_DIR_TEST + ".json", cfg.ICDAR_DATA_DIR_TEST ) if args.eval_only: model = MyTrainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = MyTrainer.test(cfg, model) return res trainer = MyTrainer(cfg) trainer.resume_or_load(resume=args.resume) return trainer.train() if __name__ == "__main__": parser = default_argument_parser() parser.add_argument("--debug", action="store_true", help="enable debug mode") args = parser.parse_args() print("Command Line Args:", args) if args.debug: import debugpy print("Enabling attach starts.") debugpy.listen(address=('0.0.0.0', 9310)) debugpy.wait_for_client() print("Enabling attach ends.") launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args,), )
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/train_net.py
import argparse import os import cv2 import tqdm def convert(fn): # given a file name, convert it into binary and store at the same position img = cv2.imread(fn) gim = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gim = cv2.adaptiveThreshold(gim, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 45, 11) g3im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR) cv2.imwrite(fn, g3im) if __name__ == '__main__': """ Now only feasible for trackA_XX """ parser = argparse.ArgumentParser() parser.add_argument('--root_dir', default="../datasets/icdar2019/at_trackA_archival") args = parser.parse_args() for fdname in os.listdir(args.root_dir): if fdname.endswith(".json"): continue ffdname = os.path.join(args.root_dir, fdname) for file in tqdm.tqdm(os.listdir(ffdname)): if file.endswith(".xml"): continue ffile = os.path.join(ffdname, file) convert(ffile)
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/adaptive_binarize.py
""" Mostly copy-paste from DINO and timm library: https://github.com/facebookresearch/dino https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py """ import warnings import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import trunc_normal_, drop_path, to_2tuple from functools import partial def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath( drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.window_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches_w, self.num_patches_h = self.window_size self.num_patches = self.window_size[0] * self.window_size[1] self.img_size = img_size self.patch_size = patch_size self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = self.proj(x) return x class HybridEmbed(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) self.img_size = img_size self.backbone = backbone if feature_size is None: with torch.no_grad(): # FIXME this is hacky, but most reliable way of determining the exact dim of the output feature # map for all networks, the feature metadata has reliable channel and stride info, but using # stride to calc feature dim requires info about padding of each stage that isn't captured. training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros( 1, in_chans, img_size[0], img_size[1]))[-1] feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) feature_dim = self.backbone.feature_info.channels()[-1] self.num_patches = feature_size[0] * feature_size[1] self.proj = nn.Linear(feature_dim, embed_dim) def forward(self, x): x = self.backbone(x)[-1] x = x.flatten(2).transpose(1, 2) x = self.proj(x) return x class ViT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, model_name='vit_base_patch16_224', img_size=384, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, num_classes=19, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.1, attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_cfg=None, pos_embed_interp=False, random_init=False, align_corners=False, use_checkpoint=False, num_extra_tokens=1, out_features=None, **kwargs, ): super(ViT, self).__init__() self.model_name = model_name self.img_size = img_size self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.depth = depth self.num_heads = num_heads self.num_classes = num_classes self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.drop_rate = drop_rate self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.hybrid_backbone = hybrid_backbone self.norm_layer = norm_layer self.norm_cfg = norm_cfg self.pos_embed_interp = pos_embed_interp self.random_init = random_init self.align_corners = align_corners self.use_checkpoint = use_checkpoint self.num_extra_tokens = num_extra_tokens self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] # self.num_stages = self.depth # self.out_indices = tuple(range(self.num_stages)) if self.hybrid_backbone is not None: self.patch_embed = HybridEmbed( self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim) else: self.patch_embed = PatchEmbed( img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim) self.num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) if self.num_extra_tokens == 2: self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter(torch.zeros( 1, self.num_patches + self.num_extra_tokens, self.embed_dim)) self.pos_drop = nn.Dropout(p=self.drop_rate) # self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer) for i in range(self.depth)]) # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here # self.repr = nn.Linear(embed_dim, representation_size) # self.repr_act = nn.Tanh() if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), nn.SyncBatchNorm(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) if self.num_extra_tokens==2: trunc_normal_(self.dist_token, std=0.2) self.apply(self._init_weights) # self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): logger = get_root_logger() trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger) ''' def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def _conv_filter(self, state_dict, patch_size=16): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict def to_2D(self, x): n, hw, c = x.shape h = w = int(math.sqrt(hw)) x = x.transpose(1, 2).reshape(n, c, h, w) return x def to_1D(self, x): n, c, h, w = x.shape x = x.reshape(n, c, -1).transpose(1, 2) return x def interpolate_pos_encoding(self, x, w, h): npatch = x.shape[1] - self.num_extra_tokens N = self.pos_embed.shape[1] - self.num_extra_tokens if npatch == N and w == h: return self.pos_embed class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens] patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:] dim = x.shape[-1] w0 = w // self.patch_embed.patch_size[0] h0 = h // self.patch_embed.patch_size[1] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 w0, h0 = w0 + 0.1, h0 + 0.1 patch_pos_embed = nn.functional.interpolate( patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), mode='bicubic', ) assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1) def prepare_tokens(self, x, mask=None): B, nc, w, h = x.shape # patch linear embedding x = self.patch_embed(x) # mask image modeling if mask is not None: x = self.mask_model(x, mask) x = x.flatten(2).transpose(1, 2) # add the [CLS] token to the embed patch tokens all_tokens = [self.cls_token.expand(B, -1, -1)] if self.num_extra_tokens == 2: dist_tokens = self.dist_token.expand(B, -1, -1) all_tokens.append(dist_tokens) all_tokens.append(x) x = torch.cat(all_tokens, dim=1) # add positional encoding to each token x = x + self.interpolate_pos_encoding(x, w, h) return self.pos_drop(x) def forward_features(self, x): # print(f"==========shape of x is {x.shape}==========") B, _, H, W = x.shape Hp, Wp = H // self.patch_size, W // self.patch_size x = self.prepare_tokens(x) features = [] for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if i in self.out_indices: xp = x[:, self.num_extra_tokens:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def deit_base_patch16(pretrained=False, **kwargs): model = ViT( patch_size=16, drop_rate=0., embed_dim=768, depth=12, num_heads=12, num_classes=1000, mlp_ratio=4., qkv_bias=True, use_checkpoint=True, num_extra_tokens=2, **kwargs) model.default_cfg = _cfg() return model def mae_base_patch16(pretrained=False, **kwargs): model = ViT( patch_size=16, drop_rate=0., embed_dim=768, depth=12, num_heads=12, num_classes=1000, mlp_ratio=4., qkv_bias=True, use_checkpoint=True, num_extra_tokens=1, **kwargs) model.default_cfg = _cfg() return model
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/deit.py
import copy import itertools import os import os.path as osp import shutil from collections import OrderedDict from xml.dom.minidom import Document import detectron2.utils.comm as comm import torch from detectron2.evaluation import COCOEvaluator from detectron2.utils.file_io import PathManager from .table_evaluation.evaluate import calc_table_score class ICDAREvaluator(COCOEvaluator): def evaluate(self, img_ids=None): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() if "proposals" in predictions[0]: self._eval_box_proposals(predictions) if "instances" in predictions[0]: self._eval_predictions(predictions, img_ids=img_ids) self.evaluate_table(predictions) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def evaluate_table(self, predictions): xml_dir = self.convert_to_xml(predictions) results = calc_table_score(xml_dir) self._results["wF1"] = results['wF1'] def convert_to_xml(self, predictions): output_dir = osp.join(self._output_dir, "xml_results") if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir, exist_ok=True) coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) results_dict = {} for result in coco_results: if result["score"] < 0.7: continue image_id = result["image_id"] if image_id not in results_dict: results_dict[image_id] = [] results_dict[image_id].append(result) for image_id, tables in results_dict.items(): file_name = f"cTDaR_t{image_id:05d}.jpg" doc = Document() root = doc.createElement('document') root.setAttribute('filename', file_name) doc.appendChild(root) for table_id, table in enumerate(tables, start=1): nodeManager = doc.createElement('table') nodeManager.setAttribute('id', str(table_id)) bbox = list(map(int, table['bbox'])) bbox_str = '{},{} {},{} {},{} {},{}'.format(bbox[0], bbox[1], bbox[0], bbox[1] + bbox[3], bbox[0] + bbox[2], bbox[1] + bbox[3], bbox[0] + bbox[2], bbox[1]) nodeCoords = doc.createElement('Coords') nodeCoords.setAttribute('points', bbox_str) nodeManager.appendChild(nodeCoords) root.appendChild(nodeManager) filename = '{}-result.xml'.format(file_name[:-4]) fp = open(os.path.join(output_dir, filename), 'w') doc.writexml(fp, indent='', addindent='\t', newl='\n', encoding="utf-8") fp.flush() fp.close() return output_dir if __name__ == '__main__': pass
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/icdar_evaluation.py
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 The official jax code is released and available at https://github.com/google-research/vision_transformer Status/TODO: * Models updated to be compatible with official impl. Args added to support backward compat for old PyTorch weights. * Weights ported from official jax impl for 384x384 base and small models, 16x16 and 32x32 patches. * Trained (supervised on ImageNet-1k) my custom 'small' patch model to 77.9, 'base' to 79.4 top-1 with this code. * Hopefully find time and GPUs for SSL or unsupervised pretraining on OpenImages w/ ImageNet fine-tune in future. Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out for some einops/einsum fun * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2020 Ross Wightman """ import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the orignal BERT implement x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., window_size=None, attn_head_dim=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None if window_size: self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) # trunc_normal_(self.relative_position_bias_table, std=.0) else: self.window_size = None self.relative_position_bias_table = None self.relative_position_index = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, rel_pos_bias=None, training_window_size=None): B, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) if self.relative_position_bias_table is not None: if training_window_size == self.window_size: relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) else: training_window_size = tuple(training_window_size.tolist()) new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3 # new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok new_relative_position_bias_table = F.interpolate( self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads, 2 * self.window_size[0] - 1, 2 * self.window_size[1] - 1), size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic', align_corners=False) new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads, new_num_relative_distance - 3).permute( 1, 0) new_relative_position_bias_table = torch.cat( [new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(training_window_size[0]) coords_w = torch.arange(training_window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += training_window_size[1] - 1 relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1 relative_position_index = \ torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = new_num_relative_distance - 3 relative_position_index[0:, 0] = new_num_relative_distance - 2 relative_position_index[0, 0] = new_num_relative_distance - 1 relative_position_bias = \ new_relative_position_bias_table[relative_position_index.view(-1)].view( training_window_size[0] * training_window_size[1] + 1, training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if rel_pos_bias is not None: attn = attn + rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, window_size=None, attn_head_dim=None): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if init_values is not None: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None def forward(self, x, rel_pos_bias=None, training_window_size=None): if self.gamma_1 is None: x = x + self.drop_path( self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches_w = self.patch_shape[0] self.num_patches_h = self.patch_shape[1] # the so-called patch_shape is the patch shape during pre-training self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, position_embedding=None, **kwargs): # FIXME look at relaxing size constraints # assert H == self.img_size[0] and W == self.img_size[1], \ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x) Hp, Wp = x.shape[2], x.shape[3] if position_embedding is not None: # interpolate the position embedding to the corresponding size position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, 1, 2) position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic') x = x + position_embedding x = x.flatten(2).transpose(1, 2) return x, (Hp, Wp) class HybridEmbed(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ def __init__(self, backbone, img_size=[224, 224], feature_size=None, in_chans=3, embed_dim=768): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) self.img_size = img_size self.backbone = backbone if feature_size is None: with torch.no_grad(): # FIXME this is hacky, but most reliable way of determining the exact dim of the output feature # map for all networks, the feature metadata has reliable channel and stride info, but using # stride to calc feature dim requires info about padding of each stage that isn't captured. training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1] feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) feature_dim = self.backbone.feature_info.channels()[-1] self.num_patches = feature_size[0] * feature_size[1] self.proj = nn.Linear(feature_dim, embed_dim) def forward(self, x): x = self.backbone(x)[-1] x = x.flatten(2).transpose(1, 2) x = self.proj(x) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.num_heads = num_heads self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) # trunc_normal_(self.relative_position_bias_table, std=.02) def forward(self, training_window_size): if training_window_size == self.window_size: relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww else: training_window_size = tuple(training_window_size.tolist()) new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3 # new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok new_relative_position_bias_table = F.interpolate( self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads, 2 * self.window_size[0] - 1, 2 * self.window_size[1] - 1), size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic', align_corners=False) new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads, new_num_relative_distance - 3).permute( 1, 0) new_relative_position_bias_table = torch.cat( [new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(training_window_size[0]) coords_w = torch.arange(training_window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += training_window_size[1] - 1 relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1 relative_position_index = \ torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = new_num_relative_distance - 3 relative_position_index[0:, 0] = new_num_relative_distance - 2 relative_position_index[0, 0] = new_num_relative_distance - 1 relative_position_bias = \ new_relative_position_bias_table[relative_position_index.view(-1)].view( training_window_size[0] * training_window_size[1] + 1, training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww return relative_position_bias class BEiT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): super(BEiT, self).__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.use_checkpoint = use_checkpoint if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) self.use_shared_rel_pos_bias = use_shared_rel_pos_bias if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # trunc_normal_(self.mask_token, std=.02) if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ logger = get_root_logger() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger, beit_spec_expand_rel_pos = self.use_rel_pos_bias, ) ''' def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward_features(self, x): B, C, H, W = x.shape x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) # Hp, Wp are HW for patches batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) x = self.pos_drop(x) features = [] training_window_size = torch.tensor([Hp, Wp]) rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) else: x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) if i in self.out_indices: xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def beit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model def beit_large_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model def dit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0.1, **kwargs) model.default_cfg = _cfg() return model def dit_large_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=1e-5, **kwargs) model.default_cfg = _cfg() return model if __name__ == '__main__': model = BEiT(use_checkpoint=True, use_shared_rel_pos_bias=True) model = model.to("cuda:0") input1 = torch.rand(2, 3, 512, 762).to("cuda:0") input2 = torch.rand(2, 3, 800, 1200).to("cuda:0") input3 = torch.rand(2, 3, 720, 1000).to("cuda:0") output1 = model(input1) output2 = model(input2) output3 = model(input3) print("all done")
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/beit.py
from detectron2.config import CfgNode as CN def add_vit_config(cfg): """ Add config for VIT. """ _C = cfg _C.MODEL.VIT = CN() # CoaT model name. _C.MODEL.VIT.NAME = "" # Output features from CoaT backbone. _C.MODEL.VIT.OUT_FEATURES = ["layer3", "layer5", "layer7", "layer11"] _C.MODEL.VIT.IMG_SIZE = [224, 224] _C.MODEL.VIT.POS_TYPE = "shared_rel" _C.MODEL.VIT.DROP_PATH = 0. _C.MODEL.VIT.MODEL_KWARGS = "{}" _C.SOLVER.OPTIMIZER = "ADAMW" _C.SOLVER.BACKBONE_MULTIPLIER = 1.0 _C.AUG = CN() _C.AUG.DETR = False _C.MODEL.IMAGE_ONLY = True _C.PUBLAYNET_DATA_DIR_TRAIN = "" _C.PUBLAYNET_DATA_DIR_TEST = "" _C.ICDAR_DATA_DIR_TRAIN = "" _C.ICDAR_DATA_DIR_TEST = "" _C.CACHE_DIR = "" _C.MODEL.CONFIG_PATH = "" # effective update steps would be MAX_ITER/GRADIENT_ACCUMULATION_STEPS # maybe need to set MAX_ITER *= GRADIENT_ACCUMULATION_STEPS _C.SOLVER.GRADIENT_ACCUMULATION_STEPS = 1
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/config.py
from detectron2.checkpoint import DetectionCheckpointer from typing import Any import torch import torch.nn as nn from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, \ ObserverBase, FakeQuantizeBase from torch import distributed as dist from scipy import interpolate import numpy as np import torch.nn.functional as F from collections import OrderedDict def append_prefix(k): prefix = 'backbone.bottom_up.backbone.' return prefix + k if not k.startswith(prefix) else k def modify_ckpt_state(model, state_dict, logger=None): # reshape absolute position embedding for Swin if state_dict.get(append_prefix('absolute_pos_embed')) is not None: absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning("Error in loading absolute_pos_embed, pass") else: state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) def get_dist_info(): if dist.is_available() and dist.is_initialized(): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return rank, world_size def resize_position_embeddings(max_position_embeddings, old_vocab_size, _k='backbone.bottom_up.backbone.embeddings.position_embeddings.weight', initializer_range=0.02, reuse_position_embedding=True): ''' Reference: unilm ALso see discussions: https://github.com/pytorch/fairseq/issues/1685 https://github.com/google-research/bert/issues/27 ''' new_position_embedding = state_dict[_k].data.new_tensor(torch.ones( size=(max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float) new_position_embedding = nn.Parameter(data=new_position_embedding, requires_grad=True) new_position_embedding.data.normal_(mean=0.0, std=initializer_range) if max_position_embeddings > old_vocab_size: logger.info("Resize > position embeddings !") max_range = max_position_embeddings if reuse_position_embedding else old_vocab_size shift = 0 while shift < max_range: delta = min(old_vocab_size, max_range - shift) new_position_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :] logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta)) shift += delta state_dict[_k] = new_position_embedding.data del new_position_embedding elif max_position_embeddings < old_vocab_size: logger.info("Resize < position embeddings !") new_position_embedding.data.copy_(state_dict[_k][:max_position_embeddings, :]) state_dict[_k] = new_position_embedding.data del new_position_embedding rank, _ = get_dist_info() all_keys = list(state_dict.keys()) for key in all_keys: if "embeddings.position_embeddings.weight" in key: if key not in model.state_dict(): # image only models do not use this key continue max_position_embeddings = model.state_dict()[key].shape[0] old_vocab_size = state_dict[key].shape[0] if max_position_embeddings != old_vocab_size: resize_position_embeddings(max_position_embeddings, old_vocab_size,_k=key) if "relative_position_index" in key: state_dict.pop(key) if "relative_position_bias_table" in key: rel_pos_bias = state_dict[key] src_num_pos, num_attn_heads = rel_pos_bias.size() if key not in model.state_dict(): continue dst_num_pos, _ = model.state_dict()[key].size() dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) if src_size != dst_size: if rank == 0: print("Position interpolate for %s from %dx%d to %dx%d" % ( key, src_size, src_size, dst_size, dst_size)) extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q # if q > 1.13492: # q = 1.13492 dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) if rank == 0: print("x = {}".format(x)) print("dx = {}".format(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) state_dict[key] = new_rel_pos_bias if append_prefix('pos_embed') in state_dict: pos_embed_checkpoint = state_dict[append_prefix('pos_embed')] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding # new_size = int(num_patches ** 0.5) new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h # class_token and dist_token are kept unchanged if orig_size != new_size_h or orig_size != new_size_w: if rank == 0: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) state_dict[append_prefix('pos_embed')] = new_pos_embed # interpolate position bias table if needed relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] if table_key not in model.state_dict(): continue table_current = model.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f"Error in loading {table_key}, pass") else: if L1 != L2: S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \ model.backbone.bottom_up.backbone.use_rel_pos_bias and \ not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \ append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict: logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ") num_layers = model.backbone.bottom_up.backbone.get_num_layers() rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")] for i in range(num_layers): state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone() state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table")) return state_dict class MyDetectionCheckpointer(DetectionCheckpointer): def _load_model(self, checkpoint: Any) -> _IncompatibleKeys: """ Load weights from a checkpoint. Args: checkpoint (Any): checkpoint contains the weights. Returns: ``NamedTuple`` with ``missing_keys``, ``unexpected_keys``, and ``incorrect_shapes`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys * **incorrect_shapes** is a list of (key, shape in checkpoint, shape in model) This is just like the return value of :func:`torch.nn.Module.load_state_dict`, but with extra support for ``incorrect_shapes``. """ checkpoint_state_dict = checkpoint.pop("model") checkpoint_state_dict = self.rename_state_dict(checkpoint_state_dict) self._convert_ndarray_to_tensor(checkpoint_state_dict) # if the state_dict comes from a model that was wrapped in a # DataParallel or DistributedDataParallel during serialization, # remove the "module" prefix before performing the matching. _strip_prefix_if_present(checkpoint_state_dict, "module.") # workaround https://github.com/pytorch/pytorch/issues/24139 model_state_dict = self.model.state_dict() incorrect_shapes = [] # rename the para in checkpoint_state_dict # some bug here, do not support re load if 'backbone.fpn_lateral2.weight' not in checkpoint_state_dict.keys(): checkpoint_state_dict = { append_prefix(k): checkpoint_state_dict[k] for k in checkpoint_state_dict.keys() } # else: resume a model, do not need append_prefix checkpoint_state_dict = modify_ckpt_state(self.model, checkpoint_state_dict, logger=self.logger) for k in list(checkpoint_state_dict.keys()): if k in model_state_dict: model_param = model_state_dict[k] # Allow mismatch for uninitialized parameters if TORCH_VERSION >= (1, 8) and isinstance( model_param, nn.parameter.UninitializedParameter ): continue shape_model = tuple(model_param.shape) shape_checkpoint = tuple(checkpoint_state_dict[k].shape) if shape_model != shape_checkpoint: has_observer_base_classes = ( TORCH_VERSION >= (1, 8) and hasattr(quantization, "ObserverBase") and hasattr(quantization, "FakeQuantizeBase") ) if has_observer_base_classes: # Handle the special case of quantization per channel observers, # where buffer shape mismatches are expected. def _get_module_for_key( model: torch.nn.Module, key: str ) -> torch.nn.Module: # foo.bar.param_or_buffer_name -> [foo, bar] key_parts = key.split(".")[:-1] cur_module = model for key_part in key_parts: cur_module = getattr(cur_module, key_part) return cur_module cls_to_skip = ( ObserverBase, FakeQuantizeBase, ) target_module = _get_module_for_key(self.model, k) if isinstance(target_module, cls_to_skip): # Do not remove modules with expected shape mismatches # them from the state_dict loading. They have special logic # in _load_from_state_dict to handle the mismatches. continue incorrect_shapes.append((k, shape_checkpoint, shape_model)) checkpoint_state_dict.pop(k) incompatible = self.model.load_state_dict(checkpoint_state_dict, strict=False) return _IncompatibleKeys( missing_keys=incompatible.missing_keys, unexpected_keys=incompatible.unexpected_keys, incorrect_shapes=incorrect_shapes, ) def rename_state_dict(self, state_dict): new_state_dict = OrderedDict() layoutlm = False for k, v in state_dict.items(): if 'layoutlmv3' in k: layoutlm = True new_state_dict[k.replace('layoutlmv3.', '')] = v if layoutlm: return new_state_dict return state_dict
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/mycheckpointer.py
# Copyright (c) Facebook, Inc. and its affiliates. import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.structures import ImageList, Instances from detectron2.utils.events import get_event_storage from detectron2.modeling.backbone import Backbone, build_backbone from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY from detectron2.modeling.meta_arch import GeneralizedRCNN from detectron2.modeling.postprocessing import detector_postprocess from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference_single_image from contextlib import contextmanager from itertools import count @META_ARCH_REGISTRY.register() class VLGeneralizedRCNN(GeneralizedRCNN): """ Generalized R-CNN. Any models that contains the following three components: 1. Per-image feature extraction (aka backbone) 2. Region proposal generation 3. Per-region feature extraction and prediction """ def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper` . Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * image: Tensor, image in (C, H, W) format. * instances (optional): groundtruth :class:`Instances` * proposals (optional): :class:`Instances`, precomputed proposals. Other information that's included in the original dicts, such as: * "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. Returns: list[dict]: Each dict is the output for one input image. The dict contains one key "instances" whose value is a :class:`Instances`. The :class:`Instances` object has the following keys: "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" """ if not self.training: return self.inference(batched_inputs) images = self.preprocess_image(batched_inputs) if "instances" in batched_inputs[0]: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] else: gt_instances = None # features = self.backbone(images.tensor) input = self.get_batch(batched_inputs, images) features = self.backbone(input) if self.proposal_generator is not None: proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) else: assert "proposals" in batched_inputs[0] proposals = [x["proposals"].to(self.device) for x in batched_inputs] proposal_losses = {} _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) if self.vis_period > 0: storage = get_event_storage() if storage.iter % self.vis_period == 0: self.visualize_training(batched_inputs, proposals) losses = {} losses.update(detector_losses) losses.update(proposal_losses) return losses def inference( self, batched_inputs: List[Dict[str, torch.Tensor]], detected_instances: Optional[List[Instances]] = None, do_postprocess: bool = True, ): """ Run inference on the given inputs. Args: batched_inputs (list[dict]): same as in :meth:`forward` detected_instances (None or list[Instances]): if not None, it contains an `Instances` object per image. The `Instances` object contains "pred_boxes" and "pred_classes" which are known boxes in the image. The inference will then skip the detection of bounding boxes, and only predict other per-ROI outputs. do_postprocess (bool): whether to apply post-processing on the outputs. Returns: When do_postprocess=True, same as in :meth:`forward`. Otherwise, a list[Instances] containing raw network outputs. """ assert not self.training images = self.preprocess_image(batched_inputs) # features = self.backbone(images.tensor) input = self.get_batch(batched_inputs, images) features = self.backbone(input) if detected_instances is None: if self.proposal_generator is not None: proposals, _ = self.proposal_generator(images, features, None) else: assert "proposals" in batched_inputs[0] proposals = [x["proposals"].to(self.device) for x in batched_inputs] results, _ = self.roi_heads(images, features, proposals, None) else: detected_instances = [x.to(self.device) for x in detected_instances] results = self.roi_heads.forward_with_given_boxes(features, detected_instances) if do_postprocess: assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) else: return results def get_batch(self, examples, images): if len(examples) >= 1 and "bbox" not in examples[0]: # image_only return {"images": images.tensor} return input def _batch_inference(self, batched_inputs, detected_instances=None): """ Execute inference on a list of inputs, using batch size = self.batch_size (e.g., 2), instead of the length of the list. Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` """ if detected_instances is None: detected_instances = [None] * len(batched_inputs) outputs = [] inputs, instances = [], [] for idx, input, instance in zip(count(), batched_inputs, detected_instances): inputs.append(input) instances.append(instance) if len(inputs) == 2 or idx == len(batched_inputs) - 1: outputs.extend( self.inference( inputs, instances if instances[0] is not None else None, do_postprocess=True, # False ) ) inputs, instances = [], [] return outputs
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/rcnn_vl.py
# -------------------------------------------------------------------------------- # VIT: Multi-Path Vision Transformer for Dense Prediction # Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI). # All Rights Reserved. # Written by Youngwan Lee # This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------------------------------- # References: # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm # CoaT: https://github.com/mlpc-ucsd/CoaT # -------------------------------------------------------------------------------- import torch from detectron2.layers import ( ShapeSpec, ) from detectron2.modeling import Backbone, BACKBONE_REGISTRY, FPN from detectron2.modeling.backbone.fpn import LastLevelP6P7, LastLevelMaxPool from .beit import beit_base_patch16, dit_base_patch16, dit_large_patch16, beit_large_patch16 from .deit import deit_base_patch16, mae_base_patch16 from layoutlmft.models.layoutlmv3 import LayoutLMv3Model from transformers import AutoConfig __all__ = [ "build_vit_fpn_backbone", ] class VIT_Backbone(Backbone): """ Implement VIT backbone. """ def __init__(self, name, out_features, drop_path, img_size, pos_type, model_kwargs, config_path=None, image_only=False, cfg=None): super().__init__() self._out_features = out_features if 'base' in name: self._out_feature_strides = {"layer3": 4, "layer5": 8, "layer7": 16, "layer11": 32} self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768} else: self._out_feature_strides = {"layer7": 4, "layer11": 8, "layer15": 16, "layer23": 32} self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024} if name == 'beit_base_patch16': model_func = beit_base_patch16 elif name == 'dit_base_patch16': model_func = dit_base_patch16 elif name == "deit_base_patch16": model_func = deit_base_patch16 elif name == "mae_base_patch16": model_func = mae_base_patch16 elif name == "dit_large_patch16": model_func = dit_large_patch16 elif name == "beit_large_patch16": model_func = beit_large_patch16 if 'beit' in name or 'dit' in name: if pos_type == "abs": self.backbone = model_func(img_size=img_size, out_features=out_features, drop_path_rate=drop_path, use_abs_pos_emb=True, **model_kwargs) elif pos_type == "shared_rel": self.backbone = model_func(img_size=img_size, out_features=out_features, drop_path_rate=drop_path, use_shared_rel_pos_bias=True, **model_kwargs) elif pos_type == "rel": self.backbone = model_func(img_size=img_size, out_features=out_features, drop_path_rate=drop_path, use_rel_pos_bias=True, **model_kwargs) else: raise ValueError() elif "layoutlmv3" in name: config = AutoConfig.from_pretrained(config_path) # disable relative bias as DiT config.has_spatial_attention_bias = False config.has_relative_attention_bias = False self.backbone = LayoutLMv3Model(config, detection=True, out_features=out_features, image_only=image_only) else: self.backbone = model_func(img_size=img_size, out_features=out_features, drop_path_rate=drop_path, **model_kwargs) self.name = name def forward(self, x): """ Args: x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. Returns: dict[str->Tensor]: names and the corresponding features """ if "layoutlmv3" in self.name: return self.backbone.forward( input_ids=x["input_ids"] if "input_ids" in x else None, bbox=x["bbox"] if "bbox" in x else None, images=x["images"] if "images" in x else None, attention_mask=x["attention_mask"] if "attention_mask" in x else None, # output_hidden_states=True, ) assert x.dim() == 4, f"VIT takes an input of shape (N, C, H, W). Got {x.shape} instead!" return self.backbone.forward_features(x) def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def build_VIT_backbone(cfg): """ Create a VIT instance from config. Args: cfg: a detectron2 CfgNode Returns: A VIT backbone instance. """ # fmt: off name = cfg.MODEL.VIT.NAME out_features = cfg.MODEL.VIT.OUT_FEATURES drop_path = cfg.MODEL.VIT.DROP_PATH img_size = cfg.MODEL.VIT.IMG_SIZE pos_type = cfg.MODEL.VIT.POS_TYPE model_kwargs = eval(str(cfg.MODEL.VIT.MODEL_KWARGS).replace("`", "")) if 'layoutlmv3' in name: if cfg.MODEL.CONFIG_PATH != '': config_path = cfg.MODEL.CONFIG_PATH else: config_path = cfg.MODEL.WEIGHTS.replace('pytorch_model.bin', '') # layoutlmv3 pre-trained models config_path = config_path.replace('model_final.pth', '') # detection fine-tuned models else: config_path = None return VIT_Backbone(name, out_features, drop_path, img_size, pos_type, model_kwargs, config_path=config_path, image_only=cfg.MODEL.IMAGE_ONLY, cfg=cfg) @BACKBONE_REGISTRY.register() def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec): """ Create a VIT w/ FPN backbone. Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_VIT_backbone(cfg) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/backbone.py
# -------------------------------------------------------------------------------- # MPViT: Multi-Path Vision Transformer for Dense Prediction # Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI). # All Rights Reserved. # Written by Youngwan Lee # This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------------------------------- from .config import add_vit_config from .backbone import build_vit_fpn_backbone from .dataset_mapper import DetrDatasetMapper from .mycheckpointer import MyDetectionCheckpointer from .icdar_evaluation import ICDAREvaluator from .mytrainer import MyTrainer from .table_evaluation import calc_table_score from .rcnn_vl import VLGeneralizedRCNN
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # from https://github.com/facebookresearch/detr/blob/main/d2/detr/dataset_mapper.py import copy import logging import numpy as np import torch from detectron2.data import detection_utils as utils from detectron2.data import transforms as T from layoutlmft import LayoutLMv3Tokenizer __all__ = ["DetrDatasetMapper"] def build_transform_gen(cfg, is_train, aug_flip_crop=True): """ Create a list of :class:`TransformGen` from config. Returns: list[TransformGen] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" if sample_style == "range": assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) logger = logging.getLogger(__name__) tfm_gens = [] if is_train and aug_flip_crop: tfm_gens.append(T.RandomFlip()) tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) if is_train: logger.info("TransformGens used in training: " + str(tfm_gens)) return tfm_gens class DetrDatasetMapper: """ A callable which takes a dataset dict in Detectron2 Dataset format, and map it into a format used by DETR. The callable currently does the following: 1. Read the image from "file_name" 2. Applies geometric transforms to the image and annotation 3. Find and applies suitable cropping to the image and annotation 4. Prepare image and annotation to Tensors """ def __init__(self, cfg, is_train=True): self.img_format = cfg.INPUT.FORMAT self.is_train = is_train self.layoutlmv3 = 'layoutlmv3' in cfg.MODEL.VIT.NAME if self.layoutlmv3: # We disable the flipping/cropping augmentation in layoutlmv3 to be consistent with pre-training # Note that we do not disable resizing augmentation since the text boxes are also resized/normalized. aug_flip_crop = False else: aug_flip_crop = True if cfg.INPUT.CROP.ENABLED and is_train and aug_flip_crop: self.crop_gen = [ T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), ] else: self.crop_gen = None self.mask_on = cfg.MODEL.MASK_ON self.tfm_gens = build_transform_gen(cfg, is_train, aug_flip_crop) logging.getLogger(__name__).info( "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) ) def __call__(self, dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below image = utils.read_image(dataset_dict["file_name"], format=self.img_format) utils.check_image_size(dataset_dict, image) if self.crop_gen is None: image, transforms = T.apply_transform_gens(self.tfm_gens, image) else: if np.random.rand() > 0.5: image, transforms = T.apply_transform_gens(self.tfm_gens, image) else: image, transforms = T.apply_transform_gens( self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image ) image_shape = image.shape[:2] # h, w # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, # but not efficient on large generic data structures due to the use of pickle & mp.Queue. # Therefore it's important to use torch.Tensor. dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if not self.is_train: # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) return dataset_dict if "annotations" in dataset_dict: # USER: Modify this if you want to keep them for some reason. for anno in dataset_dict["annotations"]: if not self.mask_on: anno.pop("segmentation", None) anno.pop("keypoints", None) # USER: Implement additional transformations if you have other types of data annos = [ utils.transform_instance_annotations(obj, transforms, image_shape) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances(annos, image_shape) dataset_dict["instances"] = utils.filter_empty_instances(instances) return dataset_dict
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/dataset_mapper.py
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. """ This file contains components with some default boilerplate logic user may need in training / testing. They will not work for everyone, but many users may find them useful. The behavior of functions/classes in this file is subject to change, since they are meant to represent the "common default behavior" people need in their projects. """ import argparse import logging import os import sys import time import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .icdar_evaluation import ICDAREvaluator from detectron2.evaluation import COCOEvaluator __all__ = [ "create_ddp_model", "default_argument_parser", "default_setup", "default_writers", "DefaultPredictor", "MyTrainer", ] def create_ddp_model(model, *, fp16_compression=False, **kwargs): """ Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. """ # noqa if comm.get_world_size() == 1: return model if "device_ids" not in kwargs: kwargs["device_ids"] = [comm.get_local_rank()] ddp = DistributedDataParallel(model, **kwargs) if fp16_compression: from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) return ddp def default_argument_parser(epilog=None): """ Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser( epilog=epilog or f""" Examples: Run on single machine: $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml Change some config options: $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 Run on multiple machines: (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags] (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags] """, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument( "--resume", action="store_true", help="Whether to attempt to resume from the checkpoint directory. " "See documentation of `MyTrainer.resume_or_load()` for what it means.", ) parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" ) # PyTorch still may leave orphan processes in multi-gpu training. # Therefore we use a deterministic way to obtain port, # so that users are aware of orphan processes by seeing the port occupied. port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14 parser.add_argument( "--dist-url", default="tcp://127.0.0.1:{}".format(port), help="initialization URL for pytorch distributed backend. See " "https://pytorch.org/docs/stable/distributed.html for details.", ) parser.add_argument( "opts", help=""" Modify config options at the end of the command. For Yacs configs, use space-separated "PATH.KEY VALUE" pairs. For python-based LazyConfig, use "path.key=value". """.strip(), default=None, nargs=argparse.REMAINDER, ) return parser def _try_get_key(cfg, *keys, default=None): """ Try select keys from cfg until the first key that exists. Otherwise return default. """ if isinstance(cfg, CfgNode): cfg = OmegaConf.create(cfg.dump()) for k in keys: none = object() p = OmegaConf.select(cfg, k, default=none) if p is not none: return p return default def _highlight(code, filename): try: import pygments except ImportError: return code from pygments.lexers import Python3Lexer, YamlLexer from pygments.formatters import Terminal256Formatter lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) return code def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), ) ) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") if isinstance(cfg, CfgNode): logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) with PathManager.open(path, "w") as f: f.write(cfg.dump()) else: LazyConfig.save(cfg, path) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) seed_all_rng(None if seed < 0 else seed + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = _try_get_key( cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False ) def default_writers(output_dir: str, max_iter: Optional[int] = None): """ Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects. """ PathManager.mkdirs(output_dir) return [ # It may not always print what you want to see, since it prints "common" metrics only. CommonMetricPrinter(max_iter), JSONWriter(os.path.join(output_dir, "metrics.json")), TensorboardXWriter(output_dir), ] class DefaultPredictor: """ Create a simple end-to-end predictor with the given config that runs on single device for a single input image. Compared to using the model directly, this class does the following additions: 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. 4. Take one input image and produce a single output, instead of a batch. This is meant for simple demo purposes, so it does the above steps automatically. This is not meant for benchmarks or running complicated inference logic. If you'd like to do anything more complicated, please refer to its source code as examples to build and use the model manually. Attributes: metadata (Metadata): the metadata of the underlying dataset, obtained from cfg.DATASETS.TEST. Examples: :: pred = DefaultPredictor(cfg) inputs = cv2.imread("input.jpg") outputs = pred(inputs) """ def __init__(self, cfg): self.cfg = cfg.clone() # cfg can be modified by model self.model = build_model(self.cfg) self.model.eval() if len(cfg.DATASETS.TEST): self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) checkpointer = DetectionCheckpointer(self.model) checkpointer.load(cfg.MODEL.WEIGHTS) self.aug = T.ResizeShortestEdge( [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST ) self.input_format = cfg.INPUT.FORMAT assert self.input_format in ["RGB", "BGR"], self.input_format def __call__(self, original_image): """ Args: original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). Returns: predictions (dict): the output of the model for one image only. See :doc:`/tutorials/models` for details about the format. """ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 # Apply pre-processing to image. if self.input_format == "RGB": # whether the model expects BGR inputs or RGB original_image = original_image[:, :, ::-1] height, width = original_image.shape[:2] image = self.aug.get_transform(original_image).apply_image(original_image) image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) inputs = {"image": image, "height": height, "width": width} predictions = self.model([inputs])[0] return predictions class MyTrainer(TrainerBase): """ A trainer with default training logic. It does the following: 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader defined by the given config. Create a LR scheduler defined by the config. 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when `resume_or_load` is called. 3. Register a few common hooks defined by the config. It is created to simplify the **standard model training workflow** and reduce code boilerplate for users who only need the standard training workflow, with standard features. It means this class makes *many assumptions* about your training logic that may easily become invalid in a new research. In fact, any assumptions beyond those made in the :class:`SimpleTrainer` are too much for research. The code of this class has been annotated about restrictive assumptions it makes. When they do not work for you, you're encouraged to: 1. Overwrite methods of this class, OR: 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and nothing else. You can then add your own hooks if needed. OR: 3. Write your own training loop similar to `tools/plain_train_net.py`. See the :doc:`/tutorials/training` tutorials for more details. Note that the behavior of this class, like other functions/classes in this file, is not stable, since it is meant to represent the "common default behavior". It is only guaranteed to work well with the standard models and training workflow in detectron2. To obtain more stable behavior, write your own training logic with other public APIs. Examples: :: trainer = MyTrainer(cfg) trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS trainer.train() Attributes: scheduler: checkpointer (DetectionCheckpointer): cfg (CfgNode): """ def __init__(self, cfg): """ Args: cfg (CfgNode): """ super().__init__() logger = logging.getLogger("detectron2") if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 setup_logger() cfg = MyTrainer.auto_scale_workers(cfg, comm.get_world_size()) self.cfg = cfg # Assume these objects must be constructed in this order. model = self.build_model(cfg) optimizer = self.build_optimizer(cfg, model) data_loader = self.build_train_loader(cfg) model = create_ddp_model(model, broadcast_buffers=False) self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( model, data_loader, optimizer ) self.scheduler = self.build_lr_scheduler(cfg, optimizer) self.checkpointer = MyDetectionCheckpointer( # Assume you want to save checkpoints together with logs/statistics model, cfg.OUTPUT_DIR, trainer=weakref.proxy(self), ) self.start_iter = 0 self.max_iter = cfg.SOLVER.MAX_ITER self.cfg = cfg self.register_hooks(self.build_hooks()) def resume_or_load(self, resume=True): """ If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by a `last_checkpoint` file), resume from the file. Resuming means loading all available states (eg. optimizer and scheduler) and update iteration counter from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. Otherwise, this is considered as an independent training. The method will load model weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start from iteration 0. Args: resume (bool): whether to do resume or not """ self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) if resume and self.checkpointer.has_checkpoint(): # The checkpoint stores the training iteration that just finished, thus we start # at the next iteration self.start_iter = self.iter + 1 def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def build_writers(self): """ Build a list of writers to be used using :func:`default_writers()`. If you'd like a different list of writers, you can overwrite it in your trainer. Returns: list[EventWriter]: a list of :class:`EventWriter` objects. """ return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) def train(self): """ Run training. Returns: OrderedDict of results, if evaluation is enabled. Otherwise None. """ super().train(self.start_iter, self.max_iter) if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): assert hasattr( self, "_last_eval_results" ), "No evaluation results obtained during training!" verify_results(self.cfg, self._last_eval_results) return self._last_eval_results def run_step(self): self._trainer.iter = self.iter if self.cfg.SOLVER.GRADIENT_ACCUMULATION_STEPS == 1: self._trainer.run_step() else: self.run_step_grad_acc(gradient_accumulation_steps=self.cfg.SOLVER.GRADIENT_ACCUMULATION_STEPS) def run_step_grad_acc(self, gradient_accumulation_steps=1): """ Implement the AMP training logic. The batch at each step will be divided by this integer and gradient will be accumulated over gradient_accumulation_steps steps. """ assert self._trainer.model.training, "[AMPTrainer] model was changed to eval mode!" assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!" from torch.cuda.amp import autocast start = time.perf_counter() data = next(self._trainer._data_loader_iter) data_time = time.perf_counter() - start with autocast(): loss_dict = self._trainer.model(data) if isinstance(loss_dict, torch.Tensor): loss_dict = loss_dict / gradient_accumulation_steps losses = loss_dict loss_dict = {"total_loss": loss_dict} else: losses = sum(loss_dict.values()) losses = losses / gradient_accumulation_steps self._trainer.grad_scaler.scale(losses).backward() if (self._trainer.iter + 1) % gradient_accumulation_steps == 0: self._trainer._write_metrics(loss_dict, data_time) self._trainer.grad_scaler.step(self._trainer.optimizer) self._trainer.grad_scaler.update() self._trainer.optimizer.zero_grad() @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) logger = logging.getLogger(__name__) logger.info("Model:\n{}".format(model)) return model @classmethod def build_optimizer(cls, cfg, model): params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for key, value in model.named_parameters(recurse=True): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if "backbone" in key: lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_train_loader(cls, cfg): if cfg.AUG.DETR: mapper = DetrDatasetMapper(cfg, is_train=True) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): """ Returns: iterable It now calls :func:`detectron2.data.build_detection_test_loader`. Overwrite it if you'd like a different data loader. """ if cfg.AUG.DETR: mapper = DetrDatasetMapper(cfg, is_train=False) else: mapper = None return build_detection_test_loader(cfg, dataset_name, mapper=mapper) # Better to use mapper, which is the same as training. # The mapper does not influence the DiT model, but it is necessary for the LayoutLMv3 model # since we customize something in mapper. # return build_detection_test_loader(cfg, dataset_name) @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") if 'icdar' not in dataset_name: return COCOEvaluator(dataset_name, output_dir=output_folder) else: return ICDAREvaluator(dataset_name, output_dir=output_folder) @classmethod def test(cls, cfg, model, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator(cfg, dataset_name) except NotImplementedError: logger.warn( "No evaluator found. Use `MyTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info("Evaluation results for {} in csv format:".format(dataset_name)) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results @staticmethod def auto_scale_workers(cfg, num_workers: int): """ When the config is defined for certain number of workers (according to ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of workers currently in use, returns a new cfg where the total batch size is scaled so that the per-GPU batch size stays the same as the original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. Other config options are also scaled accordingly: * training steps and warmup steps are scaled inverse proportionally. * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. For example, with the original config like the following: .. code-block:: yaml IMS_PER_BATCH: 16 BASE_LR: 0.1 REFERENCE_WORLD_SIZE: 8 MAX_ITER: 5000 STEPS: (4000,) CHECKPOINT_PERIOD: 1000 When this config is used on 16 GPUs instead of the reference number 8, calling this method will return a new config with: .. code-block:: yaml IMS_PER_BATCH: 32 BASE_LR: 0.2 REFERENCE_WORLD_SIZE: 16 MAX_ITER: 2500 STEPS: (2000,) CHECKPOINT_PERIOD: 500 Note that both the original config and this new config can be trained on 16 GPUs. It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). Returns: CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. """ old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE if old_world_size == 0 or old_world_size == num_workers: return cfg cfg = cfg.clone() frozen = cfg.is_frozen() cfg.defrost() assert ( cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 ), "Invalid REFERENCE_WORLD_SIZE in config!" scale = num_workers / old_world_size bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale)) lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale)) warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale)) cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS) cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant logger = logging.getLogger(__name__) logger.info( f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " f"max_iter={max_iter}, warmup={warmup_iter}." ) if frozen: cfg.freeze() return cfg # Access basic attributes from the underlying trainer for _attr in ["model", "data_loader", "optimizer"]: setattr( MyTrainer, _attr, property( # getter lambda self, x=_attr: getattr(self._trainer, x), # setter lambda self, value, x=_attr: setattr(self._trainer, x, value), ), )
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/mytrainer.py
from .evaluate import calc_table_score
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/table_evaluation/__init__.py
""" Evaluation of -.tar.gz file. Yu Fang - March 2019 """ import os import xml.dom.minidom # from eval import eval if os.path.exists("/mnt/localdata/Users/junlongli/projects/datasets/icdar2019"): PATH = "/mnt/localdata/Users/junlongli/projects/datasets/icdar2019/trackA_modern/test" else: PATH = "/mnt/data/data/icdar2019/trackA_modern/test" reg_gt_path = os.path.abspath(PATH) reg_gt_path_archival = os.path.abspath(PATH) reg_gt_path_modern = os.path.abspath(PATH) str_gt_path_1 = os.path.abspath(PATH) str_gt_path_2 = os.path.abspath(PATH) str_gt_path_archival = os.path.abspath(PATH) str_gt_path_modern = os.path.abspath(PATH) import xml.dom.minidom # from functools import cmp_to_key from os.path import join as osj from .data_structure import * class eval: STR = "-str" REG = "-reg" DEFAULT_ENCODING = "UTF-8" # reg_gt_path = "./annotations/trackA/" # str_gt_path = "./annotations/trackB/" # reg_gt_path = os.path.abspath("data/test") # reg_gt_path_archival = os.path.abspath("data/test") # reg_gt_path_modern = os.path.abspath("data/test") # str_gt_path_1 = os.path.abspath("data/test") # str_gt_path_2 = os.path.abspath("data/test") # str_gt_path_archival = os.path.abspath("data/test") # str_gt_path_modern = os.path.abspath("data/test") # dummyDom = xml.dom.minidom.parse("./dummyXML.xml") def __init__(self, track, res_path): self.return_result = None self.reg = True self.str = False self.resultFile = res_path self.inPrefix = os.path.split(res_path)[-1].split(".")[0][:-7] if track == "-trackA": self.reg = True self.GTFile = osj(reg_gt_path, self.inPrefix + ".xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix) elif track == "-trackA1": # archival documents self.reg = True self.GTFile = osj(reg_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackA2": # modern documents self.reg = True self.GTFile = osj(reg_gt_path_modern, self.inPrefix + ".xml") elif track == "-trackB1": self.str = True self.GTFile = osj(str_gt_path_1, self.inPrefix + ".xml") # self.GTFile = osj(self.str_gt_path_1, self.inPrefix) elif track == "-trackB2": self.str = True self.GTFile = osj(str_gt_path_2, self.inPrefix + ".xml") # print(self.GTFile) # self.GTFile = osj(self.str_gt_path_2, self.inPrefix) elif track == "-trackB2_a": self.str = True self.GTFile = osj(str_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackB2_m": self.str = True self.GTFile = osj(str_gt_path_modern, self.inPrefix + ".xml") else: print(track) print("Not a valid track, please check your spelling.") # self.resultFile = res_path # self.inPrefix = os.path.split(res_path)[-1].split("-")[0] # if self.str: # # self.GTFile = osj(self.str_gt_path, self.inPrefix + "-str.xml") # self.GTFile = osj(self.str_gt_path, self.inPrefix + ".xml") # elif self.reg: # # self.GTFile = osj(self.reg_gt_path, self.inPrefix + "-reg.xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix + ".xml") # else: # print("Not a valid track, please check your spelling.") self.gene_ret_lst() @property def result(self): return self.return_result def gene_ret_lst(self): ret_lst = [] for iou in [0.6, 0.7, 0.8, 0.9]: temp = self.compute_retVal(iou) ret_lst.append(temp) # ret_lst.append(self.compute_retVal(iou)) ret_lst.append(self.inPrefix + ".xml") # ret_lst.append(self.inPrefix) # print("Done processing {}\n".format(self.resultFile)) self.return_result = ret_lst def compute_retVal(self, iou): gt_dom = xml.dom.minidom.parse(self.GTFile) # incorrect submission format handling try: result_dom = xml.dom.minidom.parse(self.resultFile) except Exception as e: # result_dom = xml.dom.minidom.parse(dummyDom) gt_tables = eval.get_table_list(gt_dom) retVal = ResultStructure(truePos=0, gtTotal=len(gt_tables), resTotal=0) return retVal # result_dom = xml.dom.minidom.parse(self.resultFile) if self.reg: ret = self.evaluate_result_reg(gt_dom, result_dom, iou) return ret if self.str: ret = self.evaluate_result_str(gt_dom, result_dom, iou) return ret @staticmethod def get_table_list(dom): """ return a list of Table objects corresponding to the table element of the DOM. """ return [Table(_nd) for _nd in dom.documentElement.getElementsByTagName("table")] @staticmethod def evaluate_result_reg(gt_dom, result_dom, iou_value): # parse the tables in input elements gt_tables = eval.get_table_list(gt_dom) result_tables = eval.get_table_list(result_dom) # duplicate result table list remaining_tables = result_tables.copy() # map the tables in gt and result file table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res) for gtt in gt_tables: for rest in remaining_tables: if gtt.compute_table_iou(rest) >= iou_value: remaining_tables.remove(rest) table_matches.append((gtt, rest)) break assert len(table_matches) <= len(gt_tables) assert len(table_matches) <= len(result_tables) retVal = ResultStructure(truePos=len(table_matches), gtTotal=len(gt_tables), resTotal=len(result_tables)) return retVal @staticmethod def evaluate_result_str(gt_dom, result_dom, iou_value, table_iou_value=0.8): # parse the tables in input elements gt_tables = eval.get_table_list(gt_dom) result_tables = eval.get_table_list(result_dom) # duplicate result table list remaining_tables = result_tables.copy() gt_remaining = gt_tables.copy() # map the tables in gt and result file table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res) for gtt in gt_remaining: for rest in remaining_tables: # note: for structural analysis, use 0.8 for table mapping if gtt.compute_table_iou(rest) >= table_iou_value: table_matches.append((gtt, rest)) remaining_tables.remove(rest) # unsafe... should be ok with the break below gt_remaining.remove(gtt) break total_gt_relation, total_res_relation, total_correct_relation = 0, 0, 0 for gt_table, ress_table in table_matches: # set up the cell mapping for matching tables cell_mapping = gt_table.find_cell_mapping(ress_table, iou_value) # set up the adj relations, convert the one for result table to a dictionary for faster searching gt_AR = gt_table.find_adj_relations() total_gt_relation += len(gt_AR) res_AR = ress_table.find_adj_relations() total_res_relation += len(res_AR) # Now map GT adjacency relations to result lMappedAR = [] for ar in gt_AR: try: resFromCell = cell_mapping[ar.fromText] resToCell = cell_mapping[ar.toText] # make a mapped adjacency relation lMappedAR.append(AdjRelation(resFromCell, resToCell, ar.direction)) except: # no mapping is possible pass # compare two list of adjacency relation correct_dect = 0 for ar1 in res_AR: for ar2 in lMappedAR: if ar1.isEqual(ar2): correct_dect += 1 break total_correct_relation += correct_dect # handle gt_relations in unmatched gt table for gtt_remain in gt_remaining: total_gt_relation += len(gtt_remain.find_adj_relations()) # handle gt_relation in unmatched res table for res_remain in remaining_tables: total_res_relation += len(res_remain.find_adj_relations()) retVal = ResultStructure(truePos=total_correct_relation, gtTotal=total_gt_relation, resTotal=total_res_relation) return retVal # calculate the gt adj_relations of the missing file # @param: file_lst - list of missing ground truth file # @param: cur_gt_num - current total of ground truth objects (tables / cells) def process_missing_files(track, gt_file_lst, cur_gt_num): if track in ["-trackA", "-trackA1", "-trackA2"]: gt_file_lst_full = [osj(reg_gt_path, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement # tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: # t = Table(res_table) # tables.append(t) cur_gt_num += 1 return cur_gt_num elif track == "-trackB1": gt_file_lst_full = [osj(str_gt_path_1, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: t = Table(res_table) tables.append(t) for table in tables: cur_gt_num += len(table.find_adj_relations()) return cur_gt_num elif track == "-trackB2": gt_file_lst_full = [osj(str_gt_path_2, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: t = Table(res_table) tables.append(t) for table in tables: cur_gt_num += len(table.find_adj_relations()) return cur_gt_num def calc(F1): sum_a = 0.6 * F1[0] + 0.7 * F1[1] + 0.8 * F1[2] + 0.9 * F1[3] sum_b = 0.6 + 0.7 + 0.8 + 0.9 return sum_a / sum_b def calc_table_score(result_path): # measure = eval(*sys.argv[1:]) gt_file_lst = os.listdir(reg_gt_path_archival) track = "-trackA1" untar_path = result_path res_lst = [] for root, files, dirs in os.walk(untar_path): for name in dirs: if name.split(".")[-1] == "xml": cur_filepath = osj(os.path.abspath(root), name) res_lst.append(eval(track, cur_filepath)) # printing for debug # print("Processing... {}".format(name)) # print("DONE WITH FILE PROCESSING\n") # note: results are stored as list of each when iou at [0.6, 0.7, 0.8, 0.9, gt_filename] # gt number should be the same for all files gt_num = 0 correct_six, res_six = 0, 0 correct_seven, res_seven = 0, 0 correct_eight, res_eight = 0, 0 correct_nine, res_nine = 0, 0 for each_file in res_lst: # print(each_file) try: gt_file_lst.remove(each_file.result[-1]) if each_file.result[-1].replace('.xml', '.jpg') in gt_file_lst: gt_file_lst.remove(each_file.result[-1].replace('.xml', '.jpg')) correct_six += each_file.result[0].truePos gt_num += each_file.result[0].gtTotal res_six += each_file.result[0].resTotal # print("{} {} {}".format(each_file.result[0].truePos, each_file.result[0].gtTotal, each_file.result[0].resTotal)) correct_seven += each_file.result[1].truePos res_seven += each_file.result[1].resTotal correct_eight += each_file.result[2].truePos res_eight += each_file.result[2].resTotal correct_nine += each_file.result[3].truePos res_nine += each_file.result[3].resTotal except: print("Error occur in processing result list.") print(each_file.result[-1]) break # print(each_file.result[-1]) # print(each_file) # for file in gt_file_lst: # if file.split(".") != "xml": # gt_file_lst.remove(file) # # print(gt_file_lst) for i in range(len(gt_file_lst) - 1, -1, -1): if gt_file_lst[i].split(".")[-1] != "xml": del gt_file_lst[i] if len(gt_file_lst) > 0: print("\nWarning: missing result annotations for file: {}\n".format(gt_file_lst)) gt_total = process_missing_files(track, gt_file_lst, gt_num) else: gt_total = gt_num try: # print("Evaluation of {}".format(track.replace("-", ""))) # iou @ 0.6 p_six = correct_six / res_six r_six = correct_six / gt_total f1_six = 2 * p_six * r_six / (p_six + r_six) print("IOU @ 0.6 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_six, r_six, f1_six)) print("correct: {}, gt: {}, res: {}\n".format(correct_six, gt_total, res_six)) # iou @ 0.7 p_seven = correct_seven / res_seven r_seven = correct_seven / gt_total f1_seven = 2 * p_seven * r_seven / (p_seven + r_seven) print("IOU @ 0.7 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_seven, r_seven, f1_seven)) print("correct: {}, gt: {}, res: {}\n".format(correct_seven, gt_total, res_seven)) # iou @ 0.8 p_eight = correct_eight / res_eight r_eight = correct_eight / gt_total f1_eight = 2 * p_eight * r_eight / (p_eight + r_eight) print("IOU @ 0.8 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_eight, r_eight, f1_eight)) print("correct: {}, gt: {}, res: {}\n".format(correct_eight, gt_total, res_eight)) # iou @ 0.9 p_nine = correct_nine / res_nine r_nine = correct_nine / gt_total f1_nine = 2 * p_nine * r_nine / (p_nine + r_nine) print("IOU @ 0.9 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_nine, r_nine, f1_nine)) print("correct: {}, gt: {}, res: {}".format(correct_nine, gt_total, res_nine)) F1 = [f1_six, f1_seven, f1_eight, f1_nine] wF1 = calc(F1) print("Average weight F1: {}".format(wF1)) return { 'p_six':p_six * 100, "r_six":r_six * 100, "f1_six":f1_six * 100, "p_seven":p_seven * 100, "r_seven":r_seven * 100, "f1_seven":f1_seven * 100, "p_eight":p_eight * 100, "r_eight":r_eight * 100, "f1_eight":f1_eight * 100, "p_nine":p_nine * 100, "r_nine":r_nine * 100, "f1_nine":f1_nine * 100, "wF1":wF1 * 100 } except ZeroDivisionError: print( "Error: zero devision error found, (possible that no adjacency relations are found), please check the file input.") return {"wF1": 0} if __name__=="__main__": pass
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/table_evaluation/evaluate.py
""" Data structures used by the evaluation process. Yu Fang - March 2019 """ from collections import Iterable import numpy as np from shapely.geometry import Polygon # helper functions def flatten(lis): for item in lis: if isinstance(item, Iterable) and not isinstance(item, str): for x in flatten(item): yield x else: yield item # derived from https://blog.csdn.net/u012433049/article/details/82909484 def compute_poly_iou(list1, list2): a1 = np.array(list1, dtype=int).reshape(-1, 2) poly1 = Polygon(a1) poly1_clean = poly1.buffer(0) a2 = np.array(list2, dtype=int).reshape(-1, 2) poly2 = Polygon(a2) poly2_clean = poly2.buffer(0) try: # iou = poly1.intersection(poly2).area / poly1.union(poly2).area iou = poly1_clean.intersection(poly2_clean).area / poly1_clean.union(poly2_clean).area except ZeroDivisionError: iou = 0 return iou class Cell(object): # @:param start_row : start row index of the Cell # @:param start_col : start column index of the Cell # @:param end-row : end row index of the Cell # @:param end-col : end column index of the Cell # @:param cell_box: bounding-box of the Cell (coordinates are saved as a string) # @:param content_box: bounding-box of the text content within Cell (unused variable) # @:param cell_id: unique id of the Cell def __init__(self, table_id, start_row, start_col, cell_box, end_row, end_col, content_box=""): self._start_row = int(start_row) self._start_col = int(start_col) self._cell_box = cell_box self._content_box = content_box self._table_id = table_id # the table_id this cell belongs to # self._cell_name = cell_id # specify the cell using passed-in cell_id self._cell_id = id(self) # self._region = region # check for end-row and end-col special case if end_row == -1: self._end_row = self.start_row else: self._end_row = int(end_row) if end_col == -1: self._end_col = self._start_col else: self._end_col = int(end_col) @property def start_row(self): return self._start_row @property def start_col(self): return self._start_col @property def end_row(self): return self._end_row @property def end_col(self): return self._end_col @property def cell_box(self): return self._cell_box @property def content_box(self): return self._content_box @property def cell_id(self): return self._cell_id @property def table_id(self): return self._table_id def __str__(self): return "CELL row=[%d, %d] col=[%d, %d] (coords=%s)" %(self.start_row, self.end_row , self.start_col, self.end_col , self.cell_box) # return the IoU value of two cell blocks def compute_cell_iou(self, another_cell): cell_box_1_temp = [] for el in self.cell_box.split(): cell_box_1_temp.append((el.split(","))) cell_box_1 = list(flatten(cell_box_1_temp)) cell_box_1 = [int(x) for x in cell_box_1] cell_box_2_temp = [] for el in another_cell.cell_box.split(): cell_box_2_temp.append((el.split(","))) cell_box_2 = list(flatten(cell_box_2_temp)) cell_box_2 = [int(x) for x in cell_box_2] return compute_poly_iou(cell_box_1, cell_box_2) # check if the two cell object denotes same cell area in table def check_same(self, another_cell): return self._start_row == another_cell.start_row and self._end_row == another_cell.end_row and \ self._start_col == another_cell.start_col and self._end_col == another_cell.end_col # Note: currently save the relation with two cell object involved, # can be replaced by cell_id in follow-up memory clean up class AdjRelation: DIR_HORIZ = 1 DIR_VERT = 2 def __init__(self, fromText, toText, direction): # @param: fromText, toText are Cell objects (may be changed to cell-ID for further development) self._fromText = fromText self._toText = toText self._direction = direction @property def fromText(self): return self._fromText @property def toText(self): return self._toText @property def direction(self): return self._direction def __str__(self): if self.direction == self.DIR_VERT: dir = "vertical" else: dir = "horizontal" return 'ADJ_RELATION: ' + str(self._fromText) + ' ' + str(self._toText) + ' ' + dir def isEqual(self, otherRelation): return self.fromText.cell_id == otherRelation.fromText.cell_id and \ self.toText.cell_id == otherRelation.toText.cell_id and self.direction == otherRelation.direction class Table: def __init__(self, tableNode): self._root = tableNode self._id = id(self) self._table_coords = "" self._maxRow = 0 # PS: indexing from 0 self._maxCol = 0 self._cells = [] # save a table as list of <Cell>s self.adj_relations = [] # save the adj_relations for the table self.parsed = False self.found = False # check if the find_adj_relations() has been called once self.parse_table() def __str__(self): return "TABLE object - {} row x {} col".format(self._maxRow+1, self._maxCol+1) @property def id(self): return self._id @property def table_coords(self): return self._table_coords @property def table_cells(self): return self._cells # parse input xml to cell lists def parse_table(self): # get the table bbox self._table_coords = str(self._root.getElementsByTagName("Coords")[0].getAttribute("points")) # get info for each cell cells = self._root.getElementsByTagName("cell") max_row = max_col = 0 for cell in cells: sr = cell.getAttribute("start-row") sc = cell.getAttribute("start-col") cell_id = cell.getAttribute("id") b_points = str(cell.getElementsByTagName("Coords")[0].getAttribute("points")) # try: # try: # text = cell.getElementsByTagName("content")[0].firstChild.nodeValue # except AttributeError: # text = "" # except IndexError: # text = "initialized cell as no content" er = cell.getAttribute("end-row") if cell.hasAttribute("end-row") else -1 ec = cell.getAttribute("end-col") if cell.hasAttribute("end-col") else -1 new_cell = Cell(table_id=str(self.id), start_row=sr, start_col=sc, cell_box=b_points, end_row=er, end_col=ec) max_row = max(max_row, int(sr), int(er)) max_col = max(max_col, int(sc), int(ec)) self._cells.append(new_cell) self._maxCol = max_col self._maxRow = max_row self.parsed = True # generate a table-like structure for finding adj_relations def convert_2d(self): table = [[0 for x in range(self._maxCol+1)] for y in range(self._maxRow+1)] # init blank cell with int 0 for cell in self._cells: cur_row = cell.start_row while cur_row <= cell.end_row: cur_col = cell.start_col while cur_col <= cell.end_col: temp = table[cur_row][cur_col] if temp == 0: table[cur_row][cur_col] = cell elif type(temp) == list: temp.append(cell) table[cur_row][cur_col] = temp else: table[cur_row][cur_col] = [temp, cell] cur_col += 1 cur_row += 1 return table def find_adj_relations(self): if self.found: return self.adj_relations else: # if len(self._cells) == 0: if self.parsed == False: # fix: cases where there's no cell in table? print("table is not parsed for further steps.") self.parse_table() self.find_adj_relations() else: retVal = [] tab = self.convert_2d() # find horizontal relations for r in range(self._maxRow+1): for c_from in range(self._maxCol): temp_pos = tab[r][c_from] if temp_pos == 0: continue elif type(temp_pos) == list: for cell in temp_pos: c_to = c_from + 1 if tab[r][c_to] != 0: # find relation between two adjacent cells if type(tab[r][c_to]) == list: for cell_to in tab[r][c_to]: if cell != cell_to and (not cell.check_same(cell_to)): adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: if cell != tab[r][c_to]: adj_relation = AdjRelation(cell, tab[r][c_to], AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: # find the next non-blank cell, if exists for temp in range(c_from + 1, self._maxCol + 1): if tab[r][temp] != 0: if type(tab[r][temp]) == list: for cell_to in tab[r][temp]: adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: adj_relation = AdjRelation(cell, tab[r][temp], AdjRelation.DIR_HORIZ) retVal.append(adj_relation) break else: c_to = c_from + 1 if tab[r][c_to] != 0: # find relation between two adjacent cells if type(tab[r][c_to]) == list: for cell_to in tab[r][c_to]: if temp_pos != cell_to: adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: if temp_pos != tab[r][c_to]: adj_relation = AdjRelation(temp_pos, tab[r][c_to], AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: # find the next non-blank cell, if exists for temp in range(c_from + 1, self._maxCol + 1): if tab[r][temp] != 0: if type(tab[r][temp]) == list: for cell_to in tab[r][temp]: adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_HORIZ) retVal.append(adj_relation) else: adj_relation = AdjRelation(temp_pos, tab[r][temp], AdjRelation.DIR_HORIZ) retVal.append(adj_relation) break # find vertical relations for c in range(self._maxCol+1): for r_from in range(self._maxRow): temp_pos = tab[r_from][c] if temp_pos == 0: continue elif type(temp_pos) == list: for cell in temp_pos: r_to = r_from + 1 if tab[r_to][c] != 0: # find relation between two adjacent cells if type(tab[r_to][c]) == list: for cell_to in tab[r_to][c]: if cell != cell_to and (not cell.check_same(cell_to)): adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_VERT) retVal.append(adj_relation) else: if cell != tab[r_to][c]: adj_relation = AdjRelation(cell, tab[r_to][c], AdjRelation.DIR_VERT) retVal.append(adj_relation) else: # find the next non-blank cell, if exists for temp in range(r_from + 1, self._maxRow + 1): if tab[temp][c] != 0: if type(tab[temp][c]) == list: for cell_to in tab[temp][c]: adj_relation = AdjRelation(cell, cell_to, AdjRelation.DIR_VERT) retVal.append(adj_relation) else: adj_relation = AdjRelation(cell, tab[temp][c], AdjRelation.DIR_VERT) retVal.append(adj_relation) break else: r_to = r_from + 1 if tab[r_to][c] != 0: # find relation between two adjacent cells if type(tab[r_to][c]) == list: for cell_to in tab[r_to][c]: if temp_pos != cell_to: adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_VERT) retVal.append(adj_relation) else: if temp_pos != tab[r_to][c]: adj_relation = AdjRelation(temp_pos, tab[r_to][c], AdjRelation.DIR_VERT) retVal.append(adj_relation) else: # find the next non-blank cell, if exists for temp in range(r_from + 1, self._maxRow + 1): if tab[temp][c] != 0: if type(tab[temp][c]) == list: for cell_to in tab[temp][c]: adj_relation = AdjRelation(temp_pos, cell_to, AdjRelation.DIR_VERT) retVal.append(adj_relation) else: adj_relation = AdjRelation(temp_pos, tab[temp][c], AdjRelation.DIR_VERT) retVal.append(adj_relation) break # eliminate duplicates repeat = True while repeat: repeat = False duplicates = [] for ar1 in retVal: for ar2 in retVal: if ar1 != ar2: if ar1.direction == ar2.direction and ar1.fromText == ar2.fromText and\ ar1.toText == ar2.toText: duplicates.append(ar2) break else: continue break if len(duplicates) > 0: repeat = True retVal.remove(duplicates[0]) self.found = True self.adj_relations = retVal return self.adj_relations # compute the IOU of table, pass-in var is another Table object def compute_table_iou(self, another_table): table_box_1_temp = [] for el in self.table_coords.split(): table_box_1_temp.append((el.split(","))) table_box_1 = list(flatten(table_box_1_temp)) table_box_1 = [int(x) for x in table_box_1] table_box_2_temp = [] for el in another_table.table_coords.split(): table_box_2_temp.append((el.split(","))) table_box_2 = list(flatten(table_box_2_temp)) table_box_2 = [int(x) for x in table_box_2] return compute_poly_iou(table_box_1, table_box_2) # find the cell mapping of tables as dictionary, pass-in var is another table and the desired IOU value def find_cell_mapping(self, target_table, iou_value): mapped_cell = [] # store the matches as tuples - (gt, result) mind the order of table when passing in for cell_1 in self.table_cells: for cell_2 in target_table.table_cells: if cell_1.compute_cell_iou(cell_2) >= iou_value: mapped_cell.append((cell_1, cell_2)) break ret = dict(mapped_cell) # print(ret) return ret # to print a table cell mapping @classmethod def printCellMapping(cls, dMappedCell): print("-"*25) for cell1, cell2 in dMappedCell.items(): print(" ", cell1, " --> ", cell2) # to print a table set of adjacency relations @classmethod def printAdjacencyRelationList(cls, lAdjRel, title=""): print("--- %s "%title + "-"*25) for adj in lAdjRel: print(adj) class ResultStructure: def __init__(self, truePos, gtTotal, resTotal): self._truePos = truePos self._gtTotal = gtTotal self._resTotal = resTotal @property def truePos(self): return self._truePos @property def gtTotal(self): return self._gtTotal @property def resTotal(self): return self._resTotal def __str__(self): return "true: {}, gt: {}, res: {}".format(self._truePos, self._gtTotal, self._resTotal)
EXA-1-master
exa/models/unilm-master/layoutlmv3/examples/object_detection/ditod/table_evaluation/data_structure.py
# -------------------------------------------------------- # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) # Github source: https://github.com/microsoft/unilm/tree/master/beats # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Based on fairseq code bases # https://github.com/pytorch/fairseq # -------------------------------------------------------- import math import numpy as np from typing import Dict, Optional, Tuple import torch from torch import Tensor, nn import torch.nn.functional as F from torch.nn import LayerNorm, Parameter from modules import ( GradMultiply, SamePad, get_activation_fn, GLU_Linear, quant_noise, ) class TransformerEncoder(nn.Module): def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_conv = nn.Conv1d( self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=args.conv_pos // 2, groups=args.conv_pos_groups, ) dropout = 0 std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) if hasattr(args, "relative_position_embedding"): self.relative_position_embedding = args.relative_position_embedding self.num_buckets = args.num_buckets self.max_distance = args.max_distance else: self.relative_position_embedding = False self.num_buckets = 0 self.max_distance = 0 self.layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, deep_norm=args.deep_norm, has_relative_attention_bias=self.relative_position_embedding, num_buckets=self.num_buckets, max_distance=self.max_distance, gru_rel_pos=args.gru_rel_pos, encoder_layers=args.encoder_layers, ) for i in range(args.encoder_layers) ] ) if self.relative_position_embedding: for i in range(1, args.encoder_layers): del self.layers[i].self_attn.relative_attention_bias self.layers[i].self_attn.relative_attention_bias = self.layers[0].self_attn.relative_attention_bias self.layer_norm_first = args.layer_norm_first self.layer_norm = LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) if args.deep_norm: deep_norm_beta = math.pow(8 * args.encoder_layers, -1 / 4) for i in range(args.encoder_layers): nn.init.xavier_normal_(self.layers[i].self_attn.k_proj.weight, gain=1) nn.init.xavier_normal_(self.layers[i].self_attn.v_proj.weight, gain=deep_norm_beta) nn.init.xavier_normal_(self.layers[i].self_attn.q_proj.weight, gain=1) nn.init.xavier_normal_(self.layers[i].self_attn.out_proj.weight, gain=deep_norm_beta) nn.init.xavier_normal_(self.layers[i].fc1.weight, gain=deep_norm_beta) nn.init.xavier_normal_(self.layers[i].fc2.weight, gain=deep_norm_beta) self.layer_wise_gradient_decay_ratio = getattr(args, "layer_wise_gradient_decay_ratio", 1) def forward(self, x, padding_mask=None, layer=None): x, layer_results = self.extract_features(x, padding_mask, layer) if self.layer_norm_first and layer is None: x = self.layer_norm(x) return x, layer_results def extract_features(self, x, padding_mask=None, tgt_layer=None): if padding_mask is not None: x[padding_mask] = 0 x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x = x + x_conv if not self.layer_norm_first: x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) layer_results = [] z = None if tgt_layer is not None: layer_results.append((x, z)) r = None pos_bias = None for i, layer in enumerate(self.layers): if self.layer_wise_gradient_decay_ratio != 1.0: x = GradMultiply.apply(x, self.layer_wise_gradient_decay_ratio) dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_bias) if tgt_layer is not None: layer_results.append((x, z)) if i == tgt_layer: r = x break if r is not None: x = r # T x B x C -> B x T x C x = x.transpose(0, 1) return x, layer_results class TransformerSentenceEncoderLayer(nn.Module): def __init__( self, embedding_dim: float = 768, ffn_embedding_dim: float = 3072, num_attention_heads: float = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", layer_norm_first: bool = False, deep_norm: bool = False, has_relative_attention_bias: bool = False, num_buckets: int = 0, max_distance: int = 0, rescale_init: bool = False, gru_rel_pos: bool = False, encoder_layers: int = 0, ) -> None: super().__init__() self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout self.activation_name = activation_fn self.activation_fn = get_activation_fn(activation_fn) self.self_attn = MultiheadAttention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, has_relative_attention_bias=has_relative_attention_bias, num_buckets=num_buckets, max_distance=max_distance, rescale_init=rescale_init, gru_rel_pos=gru_rel_pos, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first self.self_attn_layer_norm = LayerNorm(self.embedding_dim) if self.activation_name == "glu": self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish") else: self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) self.final_layer_norm = LayerNorm(self.embedding_dim) self.deep_norm = deep_norm if self.deep_norm: self.deep_norm_alpha = math.pow(2 * encoder_layers, 1 / 4) else: self.deep_norm_alpha = 1 def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, pos_bias=None ): residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) x, attn, pos_bias = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, position_bias=pos_bias ) x = self.dropout1(x) x = residual + x residual = x x = self.final_layer_norm(x) if self.activation_name == "glu": x = self.fc1(x) else: x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual + x else: x, attn, pos_bias = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=need_weights, attn_mask=self_attn_mask, position_bias=pos_bias ) x = self.dropout1(x) x = residual * self.deep_norm_alpha + x x = self.self_attn_layer_norm(x) residual = x if self.activation_name == "glu": x = self.fc1(x) else: x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = residual * self.deep_norm_alpha + x x = self.final_layer_norm(x) return x, attn, pos_bias class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, has_relative_attention_bias=False, num_buckets=32, max_distance=128, gru_rel_pos=False, rescale_init=False, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = nn.Dropout(dropout) self.has_relative_attention_bias = has_relative_attention_bias self.num_buckets = num_buckets self.max_distance = max_distance if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(num_buckets, num_heads) self.head_dim = embed_dim // num_heads self.q_head_dim = self.head_dim self.k_head_dim = self.head_dim assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) k_bias = True if rescale_init: k_bias = False k_embed_dim = embed_dim q_embed_dim = embed_dim self.k_proj = quant_noise( nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.gru_rel_pos = gru_rel_pos if self.gru_rel_pos: self.grep_linear = nn.Linear(self.q_head_dim, 8) self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1)) self.reset_parameters() def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) if self.has_relative_attention_bias: nn.init.xavier_normal_(self.relative_attention_bias.weight) def _relative_positions_bucket(self, relative_positions, bidirectional=True): num_buckets = self.num_buckets max_distance = self.max_distance relative_buckets = 0 if bidirectional: num_buckets = num_buckets // 2 relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) else: relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions)) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_postion_if_large = max_exact + ( torch.log(relative_positions.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_postion_if_large = torch.min( relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large) return relative_buckets def compute_bias(self, query_length, key_length): context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket( relative_position, bidirectional=True ) relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]) return values def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, position_bias: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if self.has_relative_attention_bias and position_bias is None: position_bias = self.compute_bias(tgt_len, src_len) position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling alpha = 32 q *= 1 / alpha if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.q_head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.k_head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = (attn_weights - attn_weights.max(dim=-1, keepdim=True)[0]) * alpha attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v, position_bias if position_bias is not None: attn_mask_rel_pos = position_bias if self.gru_rel_pos == 1: query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) * alpha / self.scaling _B, _H, _L, __ = query_layer.size() gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view( _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1) gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0 attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, tgt_len, 1) * position_bias attn_mask_rel_pos = attn_mask_rel_pos.view(attn_weights.size()) attn_weights = attn_weights + attn_mask_rel_pos attn_weights_float = F.softmax( attn_weights, dim=-1 ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights, position_bias @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_( data.cpu().normal_(mean=0.0, std=0.02).to(data.device) ) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
EXA-1-master
exa/models/unilm-master/beats/backbone.py
# -------------------------------------------------------- # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) # Github source: https://github.com/microsoft/unilm/tree/master/beats # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Based on VQGAN code bases # https://github.com/CompVis/taming-transformers # --------------------------------------------------------' import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as distributed try: from einops import rearrange, repeat except ImportError: pass def l2norm(t): return F.normalize(t, p=2, dim=-1) def ema_inplace(moving_avg, new, decay): moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) def sample_vectors(samples, num): num_samples, device = samples.shape[0], samples.device if num_samples >= num: indices = torch.randperm(num_samples, device=device)[:num] else: indices = torch.randint(0, num_samples, (num,), device=device) return samples[indices] def kmeans(samples, num_clusters, num_iters=10, use_cosine_sim=False): dim, dtype, device = samples.shape[-1], samples.dtype, samples.device means = sample_vectors(samples, num_clusters) for _ in range(num_iters): if use_cosine_sim: dists = samples @ means.t() else: diffs = rearrange(samples, 'n d -> n () d') \ - rearrange(means, 'c d -> () c d') dists = -(diffs ** 2).sum(dim=-1) buckets = dists.max(dim=-1).indices bins = torch.bincount(buckets, minlength=num_clusters) zero_mask = bins == 0 bins_min_clamped = bins.masked_fill(zero_mask, 1) new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) new_means.scatter_add_(0, repeat(buckets, 'n -> n d', d=dim), samples) new_means = new_means / bins_min_clamped[..., None] if use_cosine_sim: new_means = l2norm(new_means) means = torch.where(zero_mask[..., None], means, new_means) return means, bins class EmbeddingEMA(nn.Module): def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5, kmeans_init=True, codebook_init_path=''): super().__init__() self.num_tokens = num_tokens self.codebook_dim = codebook_dim self.decay = decay self.eps = eps if codebook_init_path == '': if not kmeans_init: weight = torch.randn(num_tokens, codebook_dim) weight = l2norm(weight) else: weight = torch.zeros(num_tokens, codebook_dim) self.register_buffer('initted', torch.Tensor([not kmeans_init])) else: print(f"load init codebook weight from {codebook_init_path}") codebook_ckpt_weight = torch.load(codebook_init_path, map_location='cpu') weight = codebook_ckpt_weight.clone() self.register_buffer('initted', torch.Tensor([True])) self.weight = nn.Parameter(weight, requires_grad=False) self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad=False) self.embed_avg = nn.Parameter(weight.clone(), requires_grad=False) # self.register_buffer('initted', torch.Tensor([not kmeans_init])) self.update = True @torch.jit.ignore def init_embed_(self, data): if self.initted: return print("Performing Kemans init for codebook") embed, cluster_size = kmeans(data, self.num_tokens, 10, use_cosine_sim=True) self.weight.data.copy_(embed) self.cluster_size.data.copy_(cluster_size) self.initted.data.copy_(torch.Tensor([True])) def forward(self, embed_id): return F.embedding(embed_id, self.weight) def cluster_size_ema_update(self, new_cluster_size): self.cluster_size.data.mul_(self.decay).add_(new_cluster_size, alpha=1 - self.decay) def embed_avg_ema_update(self, new_embed_avg): self.embed_avg.data.mul_(self.decay).add_(new_embed_avg, alpha=1 - self.decay) def weight_update(self, num_tokens): n = self.cluster_size.sum() smoothed_cluster_size = ( (self.cluster_size + self.eps) / (n + num_tokens * self.eps) * n ) # normalize embedding average with smoothed cluster size embed_normalized = self.embed_avg / smoothed_cluster_size.unsqueeze(1) # embed_normalized = l2norm(self.embed_avg / smoothed_cluster_size.unsqueeze(1)) self.weight.data.copy_(embed_normalized) def norm_ema_inplace(moving_avg, new, decay): moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) moving_avg.data.copy_(l2norm(moving_avg.data)) class NormEMAVectorQuantizer(nn.Module): def __init__(self, n_embed, embedding_dim, beta, decay=0.99, eps=1e-5, statistic_code_usage=True, kmeans_init=False, codebook_init_path=''): super().__init__() self.codebook_dim = embedding_dim self.num_tokens = n_embed self.beta = beta self.decay = decay # learnable = True if orthogonal_reg_weight > 0 else False self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps, kmeans_init, codebook_init_path) self.statistic_code_usage = statistic_code_usage if statistic_code_usage: self.register_buffer('cluster_size', torch.zeros(n_embed)) if distributed.is_available() and distributed.is_initialized(): print("ddp is enable, so use ddp_reduce to sync the statistic_code_usage for each gpu!") self.all_reduce_fn = distributed.all_reduce else: self.all_reduce_fn = nn.Identity() def reset_cluster_size(self, device): if self.statistic_code_usage: self.register_buffer('cluster_size', torch.zeros(self.num_tokens)) self.cluster_size = self.cluster_size.to(device) def forward(self, z): # reshape z -> (batch, height, width, channel) and flatten # z, 'b c h w -> b h w c' # z = rearrange(z, 'b c h w -> b h w c') # z = z.transpose(1, 2) z = l2norm(z) z_flattened = z.reshape(-1, self.codebook_dim) self.embedding.init_embed_(z_flattened) d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \ self.embedding.weight.pow(2).sum(dim=1) - 2 * \ torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) # 'n d -> d n' encoding_indices = torch.argmin(d, dim=1) z_q = self.embedding(encoding_indices).view(z.shape) encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype) if not self.training: with torch.no_grad(): cluster_size = encodings.sum(0) self.all_reduce_fn(cluster_size) ema_inplace(self.cluster_size, cluster_size, self.decay) if self.training and self.embedding.update: # EMA cluster size bins = encodings.sum(0) self.all_reduce_fn(bins) # self.embedding.cluster_size_ema_update(bins) ema_inplace(self.cluster_size, bins, self.decay) zero_mask = (bins == 0) bins = bins.masked_fill(zero_mask, 1.) embed_sum = z_flattened.t() @ encodings self.all_reduce_fn(embed_sum) embed_normalized = (embed_sum / bins.unsqueeze(0)).t() embed_normalized = l2norm(embed_normalized) embed_normalized = torch.where(zero_mask[..., None], self.embedding.weight, embed_normalized) norm_ema_inplace(self.embedding.weight, embed_normalized, self.decay) # compute loss for embedding loss = self.beta * F.mse_loss(z_q.detach(), z) # preserve gradients z_q = z + (z_q - z).detach() # reshape back to match original input shape # z_q, 'b h w c -> b c h w' # z_q = rearrange(z_q, 'b h w c -> b c h w') # z_q = z_q.transpose(1, 2) return z_q, loss, encoding_indices
EXA-1-master
exa/models/unilm-master/beats/quantizer.py
# -------------------------------------------------------- # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) # Github source: https://github.com/microsoft/unilm/tree/master/beats # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Based on fairseq code bases # https://github.com/pytorch/fairseq # -------------------------------------------------------- import torch import torch.nn as nn from torch.nn import LayerNorm import torchaudio.compliance.kaldi as ta_kaldi from backbone import ( TransformerEncoder, ) import logging from typing import Optional logger = logging.getLogger(__name__) class BEATsConfig: def __init__(self, cfg=None): self.input_patch_size: int = -1 # path size of patch embedding self.embed_dim: int = 512 # patch embedding dimension self.conv_bias: bool = False # include bias in conv encoder self.encoder_layers: int = 12 # num encoder layers in the transformer self.encoder_embed_dim: int = 768 # encoder embedding dimension self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN self.encoder_attention_heads: int = 12 # num encoder attention heads self.activation_fn: str = "gelu" # activation function to use self.layer_wise_gradient_decay_ratio: float = 1.0 # ratio for layer-wise gradient decay self.layer_norm_first: bool = False # apply layernorm first in the transformer self.deep_norm: bool = False # apply deep_norm first in the transformer # dropouts self.dropout: float = 0.1 # dropout probability for the transformer self.attention_dropout: float = 0.1 # dropout probability for attention weights self.activation_dropout: float = 0.0 # dropout probability after activation in FFN self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr) # positional embeddings self.conv_pos: int = 128 # number of filters for convolutional positional embeddings self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding # relative position embedding self.relative_position_embedding: bool = False # apply relative position embedding self.num_buckets: int = 320 # number of buckets for relative position embedding self.max_distance: int = 1280 # maximum distance for relative position embedding self.gru_rel_pos: bool = False # apply gated relative position embedding # label predictor self.finetuned_model: bool = False # whether the model is a fine-tuned model. self.predictor_dropout: float = 0.1 # dropout probability for the predictor self.predictor_class: int = 527 # target class number for the predictor if cfg is not None: self.update(cfg) def update(self, cfg: dict): self.__dict__.update(cfg) class BEATs(nn.Module): def __init__( self, cfg: BEATsConfig, ) -> None: super().__init__() logger.info(f"BEATs Config: {cfg.__dict__}") self.cfg = cfg self.embed = cfg.embed_dim self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim else None ) self.input_patch_size = cfg.input_patch_size self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size, bias=cfg.conv_bias) self.dropout_input = nn.Dropout(cfg.dropout_input) assert not cfg.deep_norm or not cfg.layer_norm_first self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) if cfg.finetuned_model: self.predictor_dropout = nn.Dropout(cfg.predictor_dropout) self.predictor = nn.Linear(cfg.encoder_embed_dim, cfg.predictor_class) else: self.predictor = None def forward_padding_mask( self, features: torch.Tensor, padding_mask: torch.Tensor, ) -> torch.Tensor: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view( padding_mask.size(0), features.size(1), -1 ) padding_mask = padding_mask.all(-1) return padding_mask def preprocess( self, source: torch.Tensor, fbank_mean: float = 15.41663, fbank_std: float = 6.55582, ) -> torch.Tensor: fbanks = [] for waveform in source: waveform = waveform.unsqueeze(0) * 2 ** 15 fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10) fbanks.append(fbank) fbank = torch.stack(fbanks, dim=0) fbank = (fbank - fbank_mean) / (2 * fbank_std) return fbank def extract_features( self, source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, fbank_mean: float = 15.41663, fbank_std: float = 6.55582, ): fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std) if padding_mask is not None: padding_mask = self.forward_padding_mask(fbank, padding_mask) fbank = fbank.unsqueeze(1) features = self.patch_embedding(fbank) features = features.reshape(features.shape[0], features.shape[1], -1) features = features.transpose(1, 2) features = self.layer_norm(features) if padding_mask is not None: padding_mask = self.forward_padding_mask(features, padding_mask) if self.post_extract_proj is not None: features = self.post_extract_proj(features) x = self.dropout_input(features) x, layer_results = self.encoder( x, padding_mask=padding_mask, ) if self.predictor is not None: x = self.predictor_dropout(x) logits = self.predictor(x) if padding_mask is not None and padding_mask.any(): logits[padding_mask] = 0 logits = logits.sum(dim=1) logits = logits / (~padding_mask).sum(dim=1).unsqueeze(-1).expand_as(logits) else: logits = logits.mean(dim=1) lprobs = torch.sigmoid(logits) return lprobs, padding_mask else: return x, padding_mask
EXA-1-master
exa/models/unilm-master/beats/BEATs.py
# -------------------------------------------------------- # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) # Github source: https://github.com/microsoft/unilm/tree/master/beats # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Based on fairseq code bases # https://github.com/pytorch/fairseq # -------------------------------------------------------- import math import warnings import torch from torch import Tensor, nn import torch.nn.functional as F class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None class SamePad(nn.Module): def __init__(self, kernel_size, causal=False): super().__init__() if causal: self.remove = kernel_size - 1 else: self.remove = 1 if kernel_size % 2 == 0 else 0 def forward(self, x): if self.remove > 0: x = x[:, :, : -self.remove] return x class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() self.act = torch.nn.Sigmoid() def forward(self, x): return x * self.act(x) class GLU_Linear(nn.Module): def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True): super(GLU_Linear, self).__init__() self.glu_type = glu_type self.output_dim = output_dim if glu_type == "sigmoid": self.glu_act = torch.nn.Sigmoid() elif glu_type == "swish": self.glu_act = Swish() elif glu_type == "relu": self.glu_act = torch.nn.ReLU() elif glu_type == "gelu": self.glu_act = torch.nn.GELU() if bias_in_glu: self.linear = nn.Linear(input_dim, output_dim * 2, True) else: self.linear = nn.Linear(input_dim, output_dim * 2, False) def forward(self, x): # to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case x = self.linear(x) if self.glu_type == "bilinear": x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2]) else: x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2])) return x def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) def get_activation_fn(activation: str): """Returns the activation function corresponding to `activation`""" if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": warnings.warn( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x elif activation == "glu": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
EXA-1-master
exa/models/unilm-master/beats/modules.py
# -------------------------------------------------------- # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) # Github source: https://github.com/microsoft/unilm/tree/master/beats # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Based on fairseq code bases # https://github.com/pytorch/fairseq # -------------------------------------------------------- import torch import torch.nn as nn from torch.nn import LayerNorm import torchaudio.compliance.kaldi as ta_kaldi from backbone import ( TransformerEncoder, ) from quantizer import ( NormEMAVectorQuantizer, ) import logging from typing import Optional logger = logging.getLogger(__name__) class TokenizersConfig: def __init__(self, cfg=None): self.input_patch_size: int = -1 # path size of patch embedding self.embed_dim: int = 512 # patch embedding dimension self.conv_bias: bool = False # include bias in conv encoder self.encoder_layers: int = 12 # num encoder layers in the transformer self.encoder_embed_dim: int = 768 # encoder embedding dimension self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN self.encoder_attention_heads: int = 12 # num encoder attention heads self.activation_fn: str = "gelu" # activation function to use self.layer_norm_first: bool = False # apply layernorm first in the transformer self.deep_norm: bool = False # apply deep_norm first in the transformer # dropouts self.dropout: float = 0.1 # dropout probability for the transformer self.attention_dropout: float = 0.1 # dropout probability for attention weights self.activation_dropout: float = 0.0 # dropout probability after activation in FFN self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr) # positional embeddings self.conv_pos: int = 128 # number of filters for convolutional positional embeddings self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding # relative position embedding self.relative_position_embedding: bool = False # apply relative position embedding self.num_buckets: int = 320 # number of buckets for relative position embedding self.max_distance: int = 1280 # maximum distance for relative position embedding self.gru_rel_pos: bool = False # apply gated relative position embedding # quantizer self.quant_n: int = 1024 # codebook number in quantizer self.quant_dim: int = 256 # codebook dimension in quantizer if cfg is not None: self.update(cfg) def update(self, cfg: dict): self.__dict__.update(cfg) class Tokenizers(nn.Module): def __init__( self, cfg: TokenizersConfig, ) -> None: super().__init__() logger.info(f"Tokenizers Config: {cfg.__dict__}") self.cfg = cfg self.embed = cfg.embed_dim self.post_extract_proj = ( nn.Linear(self.embed, cfg.encoder_embed_dim) if self.embed != cfg.encoder_embed_dim else None ) self.input_patch_size = cfg.input_patch_size self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size, bias=cfg.conv_bias) self.dropout_input = nn.Dropout(cfg.dropout_input) assert not cfg.deep_norm or not cfg.layer_norm_first self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.embed) self.quantize = NormEMAVectorQuantizer( n_embed=cfg.quant_n, embedding_dim=cfg.quant_dim, beta=1.0, kmeans_init=True, decay=0.99, ) self.quant_n = cfg.quant_n self.quantize_layer = nn.Sequential( nn.Linear(cfg.encoder_embed_dim, cfg.encoder_embed_dim), nn.Tanh(), nn.Linear(cfg.encoder_embed_dim, cfg.quant_dim) # for quantize ) def forward_padding_mask( self, features: torch.Tensor, padding_mask: torch.Tensor, ) -> torch.Tensor: extra = padding_mask.size(1) % features.size(1) if extra > 0: padding_mask = padding_mask[:, :-extra] padding_mask = padding_mask.view( padding_mask.size(0), features.size(1), -1 ) padding_mask = padding_mask.all(-1) return padding_mask def preprocess( self, source: torch.Tensor, fbank_mean: float = 15.41663, fbank_std: float = 6.55582, ) -> torch.Tensor: fbanks = [] for waveform in source: waveform = waveform.unsqueeze(0) * 2 ** 15 fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10) fbanks.append(fbank) fbank = torch.stack(fbanks, dim=0) fbank = (fbank - fbank_mean) / (2 * fbank_std) return fbank def extract_labels( self, source: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, fbank_mean: float = 15.41663, fbank_std: float = 6.55582, ): fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std) if padding_mask is not None: padding_mask = self.forward_padding_mask(fbank, padding_mask) fbank = fbank.unsqueeze(1) features = self.patch_embedding(fbank) features = features.reshape(features.shape[0], features.shape[1], -1) features = features.transpose(1, 2) features = self.layer_norm(features) if padding_mask is not None: padding_mask = self.forward_padding_mask(features, padding_mask) if self.post_extract_proj is not None: features = self.post_extract_proj(features) x = self.dropout_input(features) x, layer_results = self.encoder( x, padding_mask=padding_mask, ) quantize_input = self.quantize_layer(x) quantize_feature, embed_loss, embed_ind = self.quantize(quantize_input) return embed_ind
EXA-1-master
exa/models/unilm-master/beats/Tokenizers.py
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("inference") def write_result(results, output_file): with open(output_file, 'w') as f: for line in results: f.write(line + '\n') @torch.no_grad() def fairseq_generate(data_lines, cfg, models, task, batch_size, device): # fairseq original decoding implementation src_dict = task.source_dictionary tgt_dict = task.target_dictionary generator = task.build_generator(models, cfg.generation) data_size = len(data_lines) all_results = [] logger.info(f'Fairseq generate batch {batch_size}') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) translations = generator.generate(models, batch, prefix_tokens=None) results = [] for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos]) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta @torch.no_grad() def baseline_forward_decoder(model, input_tokens, encoder_out: Dict[str, List[Tensor]], incremental_state: Dict[str, Dict[str, Optional[Tensor]]], parallel_forward_start_pos=None, temperature: float = 1.0): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) pred_tokens = torch.argmax(decoder_out_tuple[0], dim=-1).squeeze(0) return pred_tokens @torch.no_grad() def baseline_generate(data_lines, model, task, device, max_len=200): # simplified AR greedy decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] logger.info(f'Baseline generate') start = time.perf_counter() for start_idx in tqdm(range(0, data_size)): bpe_line = data_lines[start_idx] src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long() net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device), 'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)} encoder_out = model.encoder.forward_torchscript(net_input) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) tokens = [tgt_dict.eos()] for step in range(0, max_len): cur_input_tokens = torch.tensor([tokens]).to(device).long() pred_token = baseline_forward_decoder(model, cur_input_tokens, encoder_out, incremental_state).item() if pred_token == tgt_dict.eos(): break else: tokens.append(pred_token) all_results.append(tgt_dict.string(tokens[1:])) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta def cut_incremental_state(incremental_state, keep_len, encoder_state_ids): for n in incremental_state: if n[: n.index('.')] in encoder_state_ids: continue for k in incremental_state[n]: if incremental_state[n][k] is not None: if incremental_state[n][k].dim() == 4: incremental_state[n][k] = incremental_state[n][k][:, :, :keep_len] elif incremental_state[n][k].dim() == 2: incremental_state[n][k] = incremental_state[n][k][:, :keep_len] @torch.no_grad() def forward_decoder(model, input_tokens, encoder_out: Dict[str, List[Tensor]], incremental_state: Dict[str, Dict[str, Optional[Tensor]]], parallel_forward_start_pos=None, temperature: float = 1.0, beta: int = 1, tau: float = 0.0): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) topk_scores, indexes = torch.topk(decoder_out_tuple[0], beta, dim=-1) topk_scores = topk_scores[0].tolist() indexes = indexes[0].tolist() for i in range(len(topk_scores)): for j, s in enumerate(topk_scores[i]): if topk_scores[i][0] - s > tau: indexes[i][j] = -1 return indexes def gad_generate(data_lines, model, AR_model, task, block_size, device, beta=1, tau=0, max_len=200): # Generalized Aggressive Decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary encoder_state_ids = [] for i in range(len(AR_model.decoder.layers)): encoder_state_ids.append(AR_model.decoder.layers[i].encoder_attn._incremental_state_id) data_size = len(data_lines) all_results = [] logger.info(f'GAD generate') pass_tokens = [0] * max_len sent_nums = [0] * max_len start = time.perf_counter() for start_idx in tqdm(range(0, data_size)): bpe_line = data_lines[start_idx] src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long() net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device), 'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)} AR_encoder_out = AR_model.encoder.forward_torchscript(net_input) encoder_out = model.encoder.forward_torchscript(net_input) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) prev_output_tokens = [tgt_dict.unk()] * block_size start_pos = 0 for step in range(0, max_len): start_pos, prev_output_tokens, pass_token = gad_forward(incremental_state, encoder_state_ids, start_pos, block_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau) pass_tokens[step] += pass_token sent_nums[step] += 1 if start_pos == -1: break all_results.append(tgt_dict.string(prev_output_tokens)) total_pass_tokens = 0 total_sent_nums = 0 for step in range(max_len): if sent_nums[step] > 0: total_pass_tokens += pass_tokens[step] total_sent_nums += sent_nums[step] print("Avg accepted tokens:", total_pass_tokens / total_sent_nums) total_iter = 0 for step in range(max_len): if sent_nums[step - 1] > 0: if step == 0: last_num = data_size else: last_num = sent_nums[step - 1] if (last_num - sent_nums[step]) > 0: total_iter += (last_num - sent_nums[step]) * (step) print("Avg decoding iteration:", total_iter / data_size) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta def gad_forward(incremental_state, encoder_state_ids, start_pos, block_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau, max_len=200): output_tokens = torch.tensor([prev_output_tokens]).to(device) _scores, _tokens = model.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) prev_output_tokens[start_pos:start_pos + block_size] = _tokens[0].tolist()[start_pos:start_pos + block_size] cut_incremental_state(incremental_state, keep_len=start_pos, encoder_state_ids=encoder_state_ids) cur_span_input_tokens = torch.tensor([[tgt_dict.eos()] + prev_output_tokens]).to(device) AR_topk_tokens = forward_decoder(AR_model, cur_span_input_tokens, AR_encoder_out, incremental_state, parallel_forward_start_pos=start_pos, beta=beta, tau=tau) bifurcation = block_size for i, (token, AR_topk_token) in enumerate(zip(prev_output_tokens[start_pos:], AR_topk_tokens[:-1][:])): if token not in AR_topk_token: bifurcation = i break next_output_tokens = prev_output_tokens[:start_pos + bifurcation] + [AR_topk_tokens[bifurcation][0]] + [ tgt_dict.unk()] * block_size pass_token = 0 find_eos = False for i, o in enumerate(next_output_tokens[start_pos:start_pos + bifurcation + 1]): if o == tgt_dict.eos() or i + start_pos == max_len: next_output_tokens = next_output_tokens[0:start_pos + i] start_pos = -1 pass_token = i find_eos = True break if not find_eos: start_pos = start_pos + bifurcation + 1 pass_token = bifurcation + 1 return start_pos, next_output_tokens, pass_token if __name__ == '__main__': parser = options.get_generation_parser() parser.add_argument('--input-path', type=str, required=True, help='path to eval file') parser.add_argument('--output-path', type=str, default=None, help='path to output file') parser.add_argument('--AR-path', type=str, default=None, help='path to AR model') parser.add_argument('--strategy', type=str, default='fairseq', help='decoding strategy, choose from: fairseq, AR, gad') parser.add_argument('--batch', type=int, default=None, help='batch size') parser.add_argument('--block-size', type=int, default=5, help='block size') parser.add_argument('--beta', type=int, default=1, help='top-beta hyperparameter') parser.add_argument('--tau', type=float, default=0, help='tolerance hyperparameter') cmd_args = options.parse_args_and_arch(parser) cmd_args.input_path = os.path.expanduser(cmd_args.input_path) cmd_args.output_path = os.path.expanduser(cmd_args.output_path) cfg = convert_namespace_to_omegaconf(cmd_args) task = tasks.setup_task(cfg.task) # NAR drafter logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args, _model_task = load_model_ensemble_and_task(filenames=[cfg.common_eval.path], task=task) if cmd_args.cpu: device = torch.device('cpu') else: device = torch.device('cuda') model = models[0].to(device).eval() # AR verifier AR_model = None AR_models = None _AR_model_task = None if cmd_args.AR_path is not None: AR_models, _AR_model_args, _AR_model_task = load_model_ensemble_and_task(filenames=[cmd_args.AR_path], arg_overrides={'data': cfg.task.data}) AR_model = AR_models[0].to(device).eval() logging.info("AR model loaded!") with open(cmd_args.input_path, 'r') as f: bpe_sents = [l.strip() for l in f.readlines()] if cmd_args.strategy == 'AR': logger.info("Decoding Strategy: Simplified AR") remove_bpe_results, delta = baseline_generate(bpe_sents, AR_model, _AR_model_task, device) logger.info(f'Simplified AR generate: {delta}') elif cmd_args.strategy == 'gad': logger.info("Decoding Strategy: GAD") remove_bpe_results, delta = gad_generate(bpe_sents, model, AR_model, task, cmd_args.block_size, device, beta=cmd_args.beta, tau=cmd_args.tau) logger.info(f'GAD generate: {delta}') else: logger.info("Decoding Strategy: fairseq") remove_bpe_results, delta = fairseq_generate(bpe_sents, cfg, AR_models, _AR_model_task, cmd_args.batch, device) logger.info(f'Fairseq generate batch {cmd_args.batch}, beam {cfg.generation.beam}: {delta}') if cmd_args.output_path is not None: write_result(remove_bpe_results, cmd_args.output_path)
EXA-1-master
exa/models/unilm-master/decoding/GAD/inference_paper.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup if sys.version_info < (3, 6): sys.exit("Sorry, Python >= 3.6 is required for fairseq.") def write_version_py(): with open(os.path.join("fairseq", "version.txt")) as f: version = f.read().strip() # append latest commit hash to version string try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"]) .decode("ascii") .strip() ) version += "+" + sha[:7] except Exception: pass # write version info to fairseq/version.py with open(os.path.join("fairseq", "version.py"), "w") as f: f.write('__version__ = "{}"\n'.format(version)) return version version = write_version_py() with open("readme.md") as f: readme = f.read() if sys.platform == "darwin": extra_compile_args = ["-stdlib=libc++", "-O3"] else: extra_compile_args = ["-std=c++11", "-O3"] class NumpyExtension(Extension): """Source: https://stackoverflow.com/a/54128391""" def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return self.__include_dirs + [numpy.get_include()] @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} try: # torch is not available when generating docs from torch.utils import cpp_extension extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat", sources=[ "fairseq/clib/libnat/edit_dist.cpp", ], ) ] ) if "CUDA_HOME" in os.environ: extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat_cuda", sources=[ "fairseq/clib/libnat_cuda/edit_dist.cu", "fairseq/clib/libnat_cuda/binding.cpp", ], ), cpp_extension.CppExtension( "fairseq.ngram_repeat_block_cuda", sources=[ "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp", "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu", ], ), ] ) cmdclass["build_ext"] = cpp_extension.BuildExtension except ImportError: pass if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if "clean" in sys.argv[1:]: # Source: https://bit.ly/2NLVsgE print("deleting Cython files...") import subprocess subprocess.run( ["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"], shell=True, ) extra_packages = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, ) def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files if __name__ == "__main__": try: # symlink examples into fairseq package so package_data accepts them fairseq_examples = os.path.join("fairseq", "examples") if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples): os.symlink(os.path.join("..", "examples"), fairseq_examples) package_data = { "fairseq": ( get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config")) ) } do_setup(package_data) finally: if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples): os.unlink(fairseq_examples)
EXA-1-master
exa/models/unilm-master/decoding/GAD/setup.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. """ from fairseq_cli.train import cli_main if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/decoding/GAD/train.py
import os import sys import time import logging from tqdm import tqdm import torch from fairseq import utils, tasks, options from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.dataclass.utils import convert_namespace_to_omegaconf from torch import Tensor from typing import Dict, List, Optional logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("inference") def write_result(results, output_file): with open(output_file, 'w') as f: for line in results: f.write(line + '\n') @torch.no_grad() def fairseq_generate(data_lines, cfg, models, task, batch_size, device): # fairseq original decoding implementation src_dict = task.source_dictionary tgt_dict = task.target_dictionary generator = task.build_generator(models, cfg.generation) data_size = len(data_lines) all_results = [] logger.info(f'Fairseq generate batch {batch_size}') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) translations = generator.generate(models, batch, prefix_tokens=None) results = [] for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos]) delta = time.perf_counter() - start remove_bpe_results = [line.replace('@@ ', '') for line in all_results] return remove_bpe_results, delta @torch.no_grad() def baseline_forward_decoder(model, input_tokens, encoder_out: Dict[str, List[Tensor]], incremental_state: Dict[str, Dict[str, Optional[Tensor]]], parallel_forward_start_pos=None, temperature: float = 1.0): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) pred_tokens = torch.argmax(decoder_out_tuple[0], dim=-1).squeeze(0) return pred_tokens @torch.no_grad() def baseline_generate(data_lines, model, task, batch_size, device, max_len=200): # simplified AR greedy decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] start = time.perf_counter() logger.info(f'Baseline generate') for start_idx in tqdm(range(0, data_size, batch_size)): batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = False batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})) batch_tokens = [[tgt_dict.eos()] for _ in range(batch_size)] finish_list = [] for step in range(0, max_len): cur_input_tokens = torch.tensor(batch_tokens).to(device).long() pred_tokens = baseline_forward_decoder(model, cur_input_tokens, encoder_out, incremental_state=incremental_state) for i, pred_tok in enumerate(pred_tokens): if len(batch_tokens[i]) == 1: batch_tokens[i].append(pred_tok.item()) else: if batch_tokens[i][-1] != tgt_dict.eos(): batch_tokens[i].append(pred_tok.item()) else: if i not in finish_list: finish_list.append(i) batch_tokens[i].append(tgt_dict.eos()) if len(finish_list) == batch_size: break batch_tokens = [y for x, y in sorted(zip(batch['id'].cpu().tolist(), batch_tokens))] for tokens in batch_tokens: all_results.append(tgt_dict.string(tokens[1:])) remove_bpe_results = [line.replace('@@ ', '') for line in all_results] delta = time.perf_counter() - start return remove_bpe_results, delta @torch.no_grad() def forward_decoder(model, input_tokens, encoder_out, incremental_state=None, parallel_forward_start_pos=None, temperature=1.0, beta=1, tau=0.0): decoder_out = model.decoder.forward(input_tokens, encoder_out=encoder_out, incremental_state=incremental_state, parallel_forward_start_pos=parallel_forward_start_pos) decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1]) topk_scores, indexes = torch.topk(decoder_out_tuple[0], beta, dim=-1) topk_scores_list = topk_scores.tolist() indexes_list = indexes.tolist() for i in range(indexes.size(0)): for j in range(indexes.size(1)): for k, s in enumerate(topk_scores_list[i][j]): if topk_scores_list[i][j][0] - s > tau: indexes_list[i][j][k] = -1 return indexes_list def gad_generate(data_lines, model, AR_model, task, block_size, batch_size, device, beta=1, tau=0, max_len=200): # Generalized Aggressive Decoding src_dict = task.source_dictionary tgt_dict = task.target_dictionary data_size = len(data_lines) all_results = [] logger.info(f'GAD generate') start = time.perf_counter() for start_idx in tqdm(range(0, data_size, batch_size)): batch_size = min(data_size - start_idx, batch_size) batch_lines = [line for line in data_lines[start_idx: start_idx + batch_size]] batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines] lengths = torch.LongTensor([t.numel() for t in batch_ids]) batch_dataset = task.build_dataset_for_inference(batch_ids, lengths) batch_dataset.left_pad_source = False batch = batch_dataset.collater(batch_dataset) batch = utils.apply_to_sample(lambda t: t.to(device), batch) net_input = batch['net_input'] AR_encoder_out = AR_model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) encoder_out = model.encoder.forward(net_input['src_tokens'], net_input['src_lengths']) sentences = [[tgt_dict.eos()] for _ in range(batch_size)] prev_output_tokens = [[tgt_dict.unk()] * block_size for _ in range(batch_size)] start_pos_list = [0] * batch_size finish_list = [] for step in range(0, max_len): prev_output_tokens, start_pos_list = gad_forward(start_pos_list, block_size, batch_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau) for i, start_pos in enumerate(start_pos_list): if i not in finish_list: if start_pos == -1: finish_list.append(i) sentences[i] = prev_output_tokens[i] if len(finish_list) == batch_size: break batch_sents = [y for x, y in sorted(zip(batch['id'].cpu().tolist(), sentences))] for s in batch_sents: all_results.append(tgt_dict.string(s)) remove_bpe_results = [line.replace('@@ ', '') for line in all_results] delta = time.perf_counter() - start return remove_bpe_results, delta def gad_forward(start_pos_list, block_size, batch_size, tgt_dict, prev_output_tokens, encoder_out, AR_encoder_out, model, AR_model, beta, tau, max_len=200): pad_tokens = [[tgt_dict.pad()] * (max_len + block_size) for _ in range(batch_size)] for i in range(batch_size): pad_tokens[i][:len(prev_output_tokens[i])] = prev_output_tokens[i] output_tokens = torch.tensor(pad_tokens).to(device) output_tokens = output_tokens[:, : output_tokens.ne(tgt_dict.pad()).sum(1).max()] _, tensor_tokens = model.decoder( normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out, ).max(-1) _tokens = tensor_tokens.tolist() for i, start_pos in enumerate(start_pos_list): if start_pos_list[i] != -1: output_tokens[i, start_pos:start_pos + block_size] = tensor_tokens[i, start_pos:start_pos + block_size] prev_output_tokens[i][start_pos:start_pos + block_size] = _tokens[i][start_pos:start_pos + block_size] append_eos = torch.tensor([[tgt_dict.eos()] for _ in range(batch_size)]).to(device) cur_span_input_tokens = torch.cat((append_eos, output_tokens), dim=-1) AR_verify_tokens = forward_decoder(AR_model, cur_span_input_tokens, AR_encoder_out, beta=beta, tau=tau) next_output_tokens = prev_output_tokens.copy() for i in range(batch_size): if start_pos_list[i] != -1: bifurcation = block_size for j, (token, AR_verify_token) in enumerate( zip(prev_output_tokens[i][start_pos_list[i]:], AR_verify_tokens[i][start_pos_list[i]:-1])): if token not in AR_verify_token: bifurcation = j break next_output_tokens[i] = prev_output_tokens[i][:start_pos_list[i] + bifurcation] + \ [AR_verify_tokens[i][start_pos_list[i] + bifurcation][0]] + \ [tgt_dict.unk()] * block_size find_eos = False for j, o in enumerate(next_output_tokens[i][start_pos_list[i]:start_pos_list[i] + bifurcation + 1]): if o == tgt_dict.eos() or start_pos_list[i] + j == max_len: next_output_tokens[i] = next_output_tokens[i][:start_pos_list[i] + j] start_pos_list[i] = -1 find_eos = True break if not find_eos: start_pos_list[i] = start_pos_list[i] + bifurcation + 1 return next_output_tokens, start_pos_list if __name__ == '__main__': parser = options.get_generation_parser() parser.add_argument('--input-path', type=str, required=True, help='path to eval file') parser.add_argument('--output-path', type=str, default=None, help='path to output file') parser.add_argument('--AR-path', type=str, default=None, help='path to AR model') parser.add_argument('--strategy', type=str, default='fairseq', help='decoding strategy, choose from: fairseq, AR, gad') parser.add_argument('--batch', type=int, default=None, help='batch size') parser.add_argument('--block-size', type=int, default=5, help='block size') parser.add_argument('--beta', type=int, default=1, help='top-beta hyperparameter') parser.add_argument('--tau', type=float, default=0, help='tolerance hyperparameter') cmd_args = options.parse_args_and_arch(parser) cmd_args.input_path = os.path.expanduser(cmd_args.input_path) cmd_args.output_path = os.path.expanduser(cmd_args.output_path) cfg = convert_namespace_to_omegaconf(cmd_args) task = tasks.setup_task(cfg.task) # NAR drafter logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args, _model_task = load_model_ensemble_and_task(filenames=[cfg.common_eval.path], task=task) if cmd_args.cpu: device = torch.device('cpu') else: device = torch.device('cuda') model = models[0].to(device).eval() if cfg.common.fp16: logging.info("NAR fp16 enabled!") model.half() # AR verifier AR_model = None AR_models = None _AR_model_task = None if cmd_args.AR_path is not None: AR_models, _AR_model_args, _AR_model_task = load_model_ensemble_and_task(filenames=[cmd_args.AR_path], arg_overrides={'data': cfg.task.data}) if cfg.common.fp16: logging.info("AR fp16 enabled!") for AR_model in AR_models: AR_model.half() AR_model = AR_models[0].to(device).eval() logging.info("AR model loaded!") with open(cmd_args.input_path, 'r') as f: bpe_sents = [l.strip() for l in f.readlines()] if cmd_args.strategy == 'AR': logger.info("Decoding Strategy: Simplified AR") remove_bpe_results, delta = baseline_generate(bpe_sents, AR_model, _AR_model_task, cmd_args.batch, device) logger.info(f'Simplified AR generate: {delta}') elif cmd_args.strategy == 'gad': logger.info("Decoding Strategy: GAD") remove_bpe_results, delta = gad_generate(bpe_sents, model, AR_model, task, cmd_args.block_size, cmd_args.batch, device, beta=cmd_args.beta, tau=cmd_args.tau) logger.info(f'GAD generate: {delta}') else: logger.info("Decoding Strategy: fairseq") remove_bpe_results, delta = fairseq_generate(bpe_sents, cfg, AR_models, _AR_model_task, cmd_args.batch, device) logger.info(f'Fairseq generate batch {cmd_args.batch}, beam {cfg.generation.beam}: {delta}') if cmd_args.output_path is not None: write_result(remove_bpe_results, cmd_args.output_path)
EXA-1-master
exa/models/unilm-master/decoding/GAD/inference.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import functools import importlib dependencies = [ "dataclasses", "hydra", "numpy", "omegaconf", "regex", "requests", "torch", ] # Check for required dependencies and raise a RuntimeError if any are missing. missing_deps = [] for dep in dependencies: try: importlib.import_module(dep) except ImportError: # Hack: the hydra package is provided under the "hydra-core" name in # pypi. We don't want the user mistakenly calling `pip install hydra` # since that will install an unrelated package. if dep == "hydra": dep = "hydra-core" missing_deps.append(dep) if len(missing_deps) > 0: raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps))) # only do fairseq imports after checking for dependencies from fairseq.hub_utils import ( # noqa; noqa BPEHubInterface as bpe, TokenizerHubInterface as tokenizer, ) from fairseq.models import MODEL_REGISTRY # noqa # torch.hub doesn't build Cython components, so if they are not found then try # to build them here try: import fairseq.data.token_block_utils_fast # noqa except ImportError: try: import cython # noqa import os from setuptools import sandbox sandbox.run_setup( os.path.join(os.path.dirname(__file__), "setup.py"), ["build_ext", "--inplace"], ) except ImportError: print( "Unable to build Cython components. Please make sure Cython is " "installed if the torch.hub model you are loading depends on it." ) # automatically expose models defined in FairseqModel::hub_models for _model_type, _cls in MODEL_REGISTRY.items(): for model_name in _cls.hub_models().keys(): globals()[model_name] = functools.partial( _cls.from_pretrained, model_name, )
EXA-1-master
exa/models/unilm-master/decoding/GAD/hubconf.py
from .criterions import * from .models import * from .tasks import * print("GAD plugins loaded...")
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from math import log import torch from fairseq import utils from fairseq.data import LanguagePairDataset from fairseq.dataclass import ChoiceEnum from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationConfig, TranslationTask, load_langpair_dataset from fairseq.utils import new_arange import logging from omegaconf import II import numpy as np NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask", "block_mask"]) @dataclass class TranslationLevenshteinConfig(TranslationConfig): noise: NOISE_CHOICES = field( default="random_delete", metadata={ "help": "type of noise" }, ) start_p: float = field( default=0.5, metadata={"help": "minus prob"} ) minus_p: float = field( default=0.2, metadata={"help": "minus prob"} ) total_up: int = field( default=300000, metadata={"help": "total updates"} ) block_size: int = field( default=5, metadata={"help": "block size"} ) logger = logging.getLogger(__name__) @register_task("translation_lev_modified", dataclass=TranslationLevenshteinConfig) class TranslationLevenshteinModifiedTask(TranslationTask): """ Translation (Sequence Generation) task for Levenshtein Transformer See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_. """ cfg: TranslationLevenshteinConfig def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.cfg.source_lang, self.cfg.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, truncate_source=self.cfg.truncate_source, ) def inject_noise(self, target_tokens): def _random_delete(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() max_len = target_tokens.size(1) target_mask = target_tokens.eq(pad) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_( target_tokens.eq(bos) | target_tokens.eq(eos), 0.0 ) target_score.masked_fill_(target_mask, 1) target_score, target_rank = target_score.sort(1) target_length = target_mask.size(1) - target_mask.float().sum( 1, keepdim=True ) # do not delete <bos> and <eos> (we assign 0 score for them) target_cutoff = ( 2 + ( (target_length - 2) * target_score.new_zeros(target_score.size(0), 1).uniform_() ).long() ) target_cutoff = target_score.sort(1)[1] >= target_cutoff prev_target_tokens = ( target_tokens.gather(1, target_rank) .masked_fill_(target_cutoff, pad) .gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1]) ) prev_target_tokens = prev_target_tokens[ :, : prev_target_tokens.ne(pad).sum(1).max() ] return prev_target_tokens def _random_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_masks = ( target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos) ) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_(~target_masks, 2.0) target_length = target_masks.sum(1).float() target_length = target_length * target_length.clone().uniform_() target_length = target_length + 1 # make sure to mask at least one token. _, target_rank = target_score.sort(1) target_cutoff = new_arange(target_rank) < target_length[:, None].long() prev_target_tokens = target_tokens.masked_fill( target_cutoff.scatter(1, target_rank, target_cutoff), unk ) return prev_target_tokens def _full_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_mask = ( target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad) ) return target_tokens.masked_fill(~target_mask, unk) def _block_mask(target_tokens): block_size = self.cfg.block_size pad = self.tgt_dict.pad() unk = self.tgt_dict.unk() target_masks = target_tokens.ne(pad) target_length = target_masks.sum(1).float() cutoff_length = target_length * target_length.clone().uniform_() cutoff_length = cutoff_length.int() + 1 # make sure to mask at least one token. prev_target_tokens = torch.ones((target_tokens.size(0), target_tokens.size(1) + block_size)).to(target_tokens) padded_target_tokens = torch.ones((target_tokens.size(0), target_tokens.size(1) + block_size)).to(target_tokens) for i in range(target_tokens.size(0)): remain_length = target_length[i].int() - cutoff_length[i] prev_target_tokens[i][:remain_length] = target_tokens[i][:remain_length] prev_target_tokens[i][remain_length:block_size + remain_length] = unk padded_target_tokens[i][:target_tokens.size(1)] = target_tokens[i] prev_target_tokens = prev_target_tokens[ :, : prev_target_tokens.ne(pad).sum(1).max() ] padded_target_tokens = padded_target_tokens[ :, : prev_target_tokens.ne(pad).sum(1).max() ] return prev_target_tokens, padded_target_tokens if self.cfg.noise == "random_delete": return _random_delete(target_tokens) elif self.cfg.noise == "random_mask": return _random_mask(target_tokens) elif self.cfg.noise == "block_mask": return _block_mask(target_tokens) elif self.cfg.noise == "full_mask": return _full_mask(target_tokens) elif self.cfg.noise == "no_noise": return target_tokens else: raise NotImplementedError def build_generator(self, models, args, **unused): # add models input to match the API for SequenceGenerator from fairseq.iterative_refinement_generator import IterativeRefinementGenerator return IterativeRefinementGenerator( self.target_dictionary, eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0), max_iter=getattr(args, "iter_decode_max_iter", 10), beam_size=getattr(args, "iter_decode_with_beam", 1), reranking=getattr(args, "iter_decode_with_external_reranker", False), decoding_format=getattr(args, "decoding_format", None), adaptive=not getattr(args, "iter_decode_force_max_iter", False), retain_history=getattr(args, "retain_iter_history", False), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: # Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/ raise NotImplementedError( "Constrained decoding with the translation_lev task is not supported" ) return LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary, append_bos=False ) def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() train_ratio = max(0, min(1, update_num / self.cfg.total_up)) sample["glat"] = {"context_p": self.cfg.start_p - self.cfg.minus_p * train_ratio} sample["prev_target"], sample["target"] = self.inject_noise(sample["target"]) with torch.autograd.profiler.record_function("forward"): loss, sample_size, logging_output = criterion(model, sample) if ignore_grad: loss *= 0 with torch.autograd.profiler.record_function("backward"): optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): sample["prev_target"], sample["target"] = self.inject_noise(sample["target"]) loss, sample_size, logging_output = criterion(model, sample) EVAL_BLEU_ORDER = 4 if self.cfg.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = bleu.sys_len logging_output["_bleu_ref_len"] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output["_bleu_counts_" + str(i)] = bleu.counts[i] logging_output["_bleu_totals_" + str(i)] = bleu.totals[i] return loss, sample_size, logging_output def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, escape_unk=False): s = self.tgt_dict.string( toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"])) refs.append( decode( utils.strip_pad(sample["target"][i], self.tgt_dict.pad()), escape_unk=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("example hypothesis: " + hyps[0]) logger.info("example reference: " + refs[0]) if self.cfg.eval_tokenized_bleu: return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none") else: return sacrebleu.corpus_bleu(hyps, [refs])
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/tasks/translation_lev_modified.py
from .translation_lev_modified import *
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/tasks/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATModel from fairseq.modules.transformer_sentence_encoder import init_bert_params import torch from fairseq.models.nat.nonautoregressive_transformer import NATransformerEncoder, NATransformerDecoder, NATransformerModel import logging import random from contextlib import contextmanager logger = logging.getLogger(__name__) @contextmanager def torch_seed(seed): state = torch.random.get_rng_state() state_cuda = torch.cuda.random.get_rng_state() torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) try: yield finally: torch.random.set_rng_state(state) torch.cuda.random.set_rng_state(state_cuda) @register_model("block") class BlockNAT(FairseqNATModel): forward_decoder = NATransformerModel.forward_decoder initialize_output_tokens = NATransformerModel.initialize_output_tokens def __init__(self, args, encoder, decoder): super().__init__(args, encoder, decoder) @staticmethod def add_args(parser): FairseqNATModel.add_args(parser) parser.add_argument( "--src-embedding-copy", action="store_true", help="copy encoder word embeddings as the initial input of the decoder", ) @classmethod def build_encoder(cls, args, tgt_dict, embed_tokens): encoder = NATransformerEncoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): encoder.apply(init_bert_params) return encoder @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) if getattr(args, "apply_bert_init", False): decoder.apply(init_bert_params) return decoder def forward( self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, glat=None, **kwargs ): # encoding encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) nonpad_positions = tgt_tokens.ne(self.pad) mask_positions = prev_output_tokens.eq(self.unk) & nonpad_positions mask_lens = (mask_positions).sum(1) l2r_positions = prev_output_tokens.ne(self.unk) & prev_output_tokens.ne(self.pad) l2r_lens = (l2r_positions).sum(1) rand_seed = random.randint(0, 19260817) glat_info = None if glat and tgt_tokens is not None: with torch.no_grad(): with torch_seed(rand_seed): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) pred_tokens = word_ins_out.argmax(-1) same_num = ((pred_tokens == tgt_tokens) & mask_positions).sum(1) input_mask = torch.ones_like(nonpad_positions) bsz, seq_len = tgt_tokens.size() for li in range(bsz): target_num = (((mask_lens[li] - same_num[li].sum()).float()) * glat['context_p']).long() if target_num > 0: input_mask[li].scatter_(dim=0, index=(torch.randperm(mask_lens[li])[:target_num].cuda() + l2r_lens[li]).cuda(), value=0) input_mask = input_mask.eq(1) tgt_mask = input_mask.masked_fill(~mask_positions, False) glat_prev_output_tokens = prev_output_tokens.masked_fill(~input_mask, 0) + tgt_tokens.masked_fill( input_mask, 0) glat_tgt_tokens = tgt_tokens.masked_fill(~tgt_mask, self.pad) prev_output_tokens, tgt_tokens = glat_prev_output_tokens, glat_tgt_tokens glat_info = { "glat_accu": (same_num.sum() / mask_lens.sum()).item(), "glat_context_p": glat['context_p'], } with torch_seed(rand_seed): word_ins_out = self.decoder( normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, ) ret = { "word_ins": { "out": word_ins_out, "tgt": tgt_tokens, "mask": tgt_tokens.ne(self.pad), "ls": self.args.label_smoothing, "nll_loss": True, } } if glat_info is not None: ret.update(glat_info) return ret @register_model_architecture( "block", "block_6e6d512" ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.src_embedding_copy = getattr(args, "src_embedding_copy", False) @register_model_architecture( "block", "block" ) def block_architecture(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", args.encoder_embed_dim*4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", args.encoder_embed_dim//64) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim*4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", args.decoder_embed_dim//64) base_architecture(args) @register_model_architecture( "block", "block_base" ) def base_architecture2(args): base_architecture(args)
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/models/BlockNAT.py
from .BlockNAT import *
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from math import log import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from torch import Tensor import numpy as np @register_criterion("glat_loss") class LabelSmoothedDualImitationCriterion(FairseqCriterion): def __init__(self, task, label_smoothing): super().__init__(task) self.label_smoothing = label_smoothing @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument( "--label-smoothing", default=0.0, type=float, metavar="D", help="epsilon for label smoothing, 0 means no label smoothing", ) parser.add_argument('--mse-lambda', default=10, type=float, metavar='D') def _compute_loss( self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0 ): """ outputs: batch x len x d_model targets: batch x len masks: batch x len policy_logprob: if there is some policy depends on the likelihood score as rewards. """ def mean_ds(x: Tensor, dim=None) -> Tensor: return ( x.float().mean().type_as(x) if dim is None else x.float().mean(dim).type_as(x) ) if masks is not None: outputs, targets = outputs[masks], targets[masks] if masks is not None and not masks.any(): nll_loss = torch.tensor(0) loss = nll_loss else: logits = F.log_softmax(outputs, dim=-1) if targets.dim() == 1: losses = F.nll_loss(logits, targets.to(logits.device), reduction="none") else: # soft-labels losses = F.kl_div(logits, targets.to(logits.device), reduction="none") losses = losses.sum(-1) nll_loss = mean_ds(losses) if label_smoothing > 0: loss = ( nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing ) else: loss = nll_loss loss = loss * factor return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor} def _custom_loss(self, loss, name="loss", factor=1.0): return {"name": name, "loss": loss, "factor": factor} def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ nsentences, ntokens = sample["nsentences"], sample["ntokens"] # B x T src_tokens, src_lengths = ( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"], ) tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"] if 'glat' in sample: glat = sample['glat'] else: glat = None outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens, glat) losses, nll_loss = [], [] for obj in outputs: if obj.startswith('glat'): continue if outputs[obj].get("loss", None) is None: _losses = self._compute_loss( outputs[obj].get("out"), outputs[obj].get("tgt"), outputs[obj].get("mask", None), outputs[obj].get("ls", 0.0), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) else: _losses = self._custom_loss( outputs[obj].get("loss"), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) losses += [_losses] if outputs[obj].get("nll_loss", False): nll_loss += [_losses.get("nll_loss", 0.0)] loss = sum(l["loss"] for l in losses) nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0) # NOTE: # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } if "glat_accu" in outputs: logging_output["glat_accu"] = outputs['glat_accu'] if "glat_context_p" in outputs: logging_output['glat_context_p'] = outputs['glat_context_p'] for l in losses: logging_output[l["name"]] = ( utils.item(l["loss"].data / l["factor"]) if reduce else l[["loss"]].data / l["factor"] ) return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs)) metrics.log_scalar( "loss", loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) log_metric("glat_accu", logging_outputs) log_metric("glat_context_p", logging_outputs) for key in logging_outputs[0]: if key[-5:] == "-loss": val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar( key[:-5], val / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False def log_metric(key, logging_outputs): if len(logging_outputs) > 0 and key in logging_outputs[0]: metrics.log_scalar( key, utils.item(np.mean([log.get(key, 0) for log in logging_outputs])), priority=10, round=3 )
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/criterions/glat_loss.py
from .glat_loss import *
EXA-1-master
exa/models/unilm-master/decoding/GAD/block_plugins/criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from typing import Callable, List, Optional import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass # this import is for backward compatibility from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa def get_preprocessing_parser(default_task="translation"): parser = get_parser("Preprocessing", default_task) add_preprocess_args(parser) return parser def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser def get_generation_parser(interactive=False, default_task="translation"): parser = get_parser("Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) add_checkpoint_args(parser) if interactive: add_interactive_args(parser) return parser def get_interactive_generation_parser(default_task="translation"): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task="language_modeling"): parser = get_parser("Evaluate Language Model", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_eval_lm_args(parser) return parser def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") gen_parser_from_dataclass(group, CommonEvalConfig()) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonConfig()) from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( "--" + registry_name.replace("_", "-"), default=REGISTRY["default"], choices=REGISTRY["registry"].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument( "--task", metavar="TASK", default=default_task, choices=TASK_REGISTRY.keys(), help="task", ) # fmt: on return parser def add_preprocess_args(parser): group = parser.add_argument_group("Preprocessing") # fmt: off group.add_argument("-s", "--source-lang", default=None, metavar="SRC", help="source language") group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", help="target language") group.add_argument("--trainpref", metavar="FP", default=None, help="train file prefix (also used to build dictionaries)") group.add_argument("--validpref", metavar="FP", default=None, help="comma separated, valid file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--testpref", metavar="FP", default=None, help="comma separated, test file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--align-suffix", metavar="FP", default=None, help="alignment file suffix") group.add_argument("--destdir", metavar="DIR", default="data-bin", help="destination dir") group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--tgtdict", metavar="FP", help="reuse given target dictionary") group.add_argument("--srcdict", metavar="FP", help="reuse given source dictionary") group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, help="number of target words to retain") group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, help="number of source words to retain") group.add_argument("--alignfile", metavar="ALIGN", default=None, help="an alignment file (optional)") parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument("--joined-dictionary", action="store_true", help="Generate joined dictionary") group.add_argument("--only-source", action="store_true", help="Only process the source language") group.add_argument("--padding-factor", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N") group.add_argument("--workers", metavar="N", default=1, type=int, help="number of parallel workers") # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("dataset_data_loading") gen_parser_from_dataclass(group, DatasetConfig()) # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("distributed_training") if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) gen_parser_from_dataclass( group, DistributedTrainingConfig(distributed_world_size=default_world_size) ) return group def add_optimization_args(parser): group = parser.add_argument_group("optimization") # fmt: off gen_parser_from_dataclass(group, OptimizationConfig()) # fmt: on return group def add_checkpoint_args(parser): group = parser.add_argument_group("checkpoint") # fmt: off gen_parser_from_dataclass(group, CheckpointConfig()) # fmt: on return group def add_common_eval_args(group): gen_parser_from_dataclass(group, CommonEvalConfig()) def add_eval_lm_args(parser): group = parser.add_argument_group("LM Evaluation") add_common_eval_args(group) gen_parser_from_dataclass(group, EvalLMConfig()) def add_generation_args(parser): group = parser.add_argument_group("Generation") add_common_eval_args(group) gen_parser_from_dataclass(group, GenerationConfig()) return group def add_interactive_args(parser): group = parser.add_argument_group("Interactive") gen_parser_from_dataclass(group, InteractiveConfig()) def add_model_args(parser): group = parser.add_argument_group("Model configuration") # fmt: off # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture') # fmt: on return group
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import numpy as np import torch from fairseq import utils DecoderOut = namedtuple( "IterativeRefinementDecoderOut", ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], ) class IterativeRefinementGenerator(object): def __init__( self, tgt_dict, models=None, eos_penalty=0.0, max_iter=10, max_ratio=2, beam_size=1, decoding_format=None, retain_dropout=False, adaptive=True, retain_history=False, reranking=False, ): """ Generates translations based on iterative refinement. Args: tgt_dict: target dictionary eos_penalty: if > 0.0, it penalized early-stopping in decoding max_iter: maximum number of refinement iterations max_ratio: generate sequences of maximum length ax, where x is the source length decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} retain_dropout: retaining dropout in the inference adaptive: decoding with early stop """ self.bos = tgt_dict.bos() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.eos_penalty = eos_penalty self.max_iter = max_iter self.max_ratio = max_ratio self.beam_size = beam_size self.reranking = reranking self.decoding_format = decoding_format self.retain_dropout = retain_dropout self.retain_history = retain_history self.adaptive = adaptive self.models = models def generate_batched_itr( self, data_itr, maxlen_a=None, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ for sample in data_itr: if "net_input" not in sample: continue if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( self.models, sample, prefix_tokens=sample["target"][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sample["ntokens"]) for i, id in enumerate(sample["id"]): # remove padding src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) ref = utils.strip_pad(sample["target"][i, :], self.pad) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample, prefix_tokens=None, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the IterativeRefinementGenerator is not supported" ) # TODO: iterative refinement generator does not support ensemble for now. if not self.retain_dropout: for model in models: model.eval() model, reranker = models[0], None if self.reranking: assert len(models) > 1, "Assuming the last checkpoint is the reranker" assert ( self.beam_size > 1 ), "Reranking requires multiple translation for each example" reranker = models[-1] models = models[:-1] if len(models) > 1 and hasattr(model, "enable_ensemble"): assert model.allow_ensemble, "{} does not support ensembling".format( model.__class__.__name__ ) model.enable_ensemble(models) # TODO: better encoder inputs? src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size() # initialize encoder_out = model.forward_encoder([src_tokens, src_lengths]) prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) if self.beam_size > 1: assert ( model.allow_length_beam ), "{} does not support decoding with length beam.".format( model.__class__.__name__ ) # regenerate data based on length-beam length_beam_order = ( utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, length_beam_order ) prev_decoder_out = model.regenerate_length_beam( prev_decoder_out, self.beam_size ) bsz = bsz * self.beam_size sent_idxs = torch.arange(bsz) prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.retain_history: prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) finalized = [[] for _ in range(bsz)] def is_a_loop(x, y, s, a): b, l_x, l_y = x.size(0), x.size(1), y.size(1) if l_x > l_y: y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) if a is not None: a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) elif l_x < l_y: x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) return (x == y).all(1), y, s, a def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): cutoff = prev_out_token.ne(self.pad) tokens = prev_out_token[cutoff] if prev_out_score is None: scores, score = None, None else: scores = prev_out_score[cutoff] score = scores.mean() if prev_out_attn is None: hypo_attn, alignment = None, None else: hypo_attn = prev_out_attn[cutoff] alignment = hypo_attn.max(dim=1)[1] return { "steps": step, "tokens": tokens, "positional_scores": scores, "score": score, "hypo_attn": hypo_attn, "alignment": alignment, } for step in range(self.max_iter + 1): decoder_options = { "eos_penalty": self.eos_penalty, "max_ratio": self.max_ratio, "decoding_format": self.decoding_format, } prev_decoder_out = prev_decoder_out._replace( step=step, max_step=self.max_iter + 1, ) decoder_out = model.forward_decoder( prev_decoder_out, encoder_out, **decoder_options ) if self.adaptive: # terminate if there is a loop terminated, out_tokens, out_scores, out_attn = is_a_loop( prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn, ) decoder_out = decoder_out._replace( output_tokens=out_tokens, output_scores=out_scores, attn=out_attn, ) else: terminated = decoder_out.output_tokens.new_zeros( decoder_out.output_tokens.size(0) ).bool() if step == self.max_iter: # reach last iteration, terminate terminated.fill_(1) # collect finalized sentences finalized_idxs = sent_idxs[terminated] finalized_tokens = decoder_out.output_tokens[terminated] finalized_scores = decoder_out.output_scores[terminated] finalized_attn = ( None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated] ) if self.retain_history: finalized_history_tokens = [h[terminated] for h in decoder_out.history] for i in range(finalized_idxs.size(0)): finalized[finalized_idxs[i]] = [ finalized_hypos( step, finalized_tokens[i], finalized_scores[i], None if finalized_attn is None else finalized_attn[i], ) ] if self.retain_history: finalized[finalized_idxs[i]][0]["history"] = [] for j in range(len(finalized_history_tokens)): finalized[finalized_idxs[i]][0]["history"].append( finalized_hypos( step, finalized_history_tokens[j][i], None, None ) ) # check if all terminated if terminated.sum() == terminated.size(0): break # for next step not_terminated = ~terminated prev_decoder_out = decoder_out._replace( output_tokens=decoder_out.output_tokens[not_terminated], output_scores=decoder_out.output_scores[not_terminated], attn=decoder_out.attn[not_terminated] if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) else None, history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None, ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() ) sent_idxs = sent_idxs[not_terminated] prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.beam_size > 1: if reranker is not None: finalized = self.rerank( reranker, finalized, [src_tokens, src_lengths], self.beam_size ) # aggregate information from length beam finalized = [ finalized[ np.argmax( [ finalized[self.beam_size * i + j][0]["score"] for j in range(self.beam_size) ] ) + self.beam_size * i ] for i in range(len(finalized) // self.beam_size) ] return finalized def rerank(self, reranker, finalized, encoder_input, beam_size): def rebuild_batch(finalized): finalized_tokens = [f[0]["tokens"] for f in finalized] finalized_maxlen = max(f.size(0) for f in finalized_tokens) final_output_tokens = ( finalized_tokens[0] .new_zeros(len(finalized_tokens), finalized_maxlen) .fill_(self.pad) ) for i, f in enumerate(finalized_tokens): final_output_tokens[i, : f.size(0)] = f return final_output_tokens final_output_tokens = rebuild_batch(finalized) final_output_tokens[ :, 0 ] = self.eos # autoregressive model assumes starting with EOS reranker_encoder_out = reranker.encoder(*encoder_input) length_beam_order = ( utils.new_arange( final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) ) .t() .reshape(-1) ) reranker_encoder_out = reranker.encoder.reorder_encoder_out( reranker_encoder_out, length_beam_order ) reranking_scores = reranker.get_normalized_probs( reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None, ) reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) reranking_masks = final_output_tokens[:, 1:].ne(self.pad) reranking_scores = ( reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) ) reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( reranking_scores ) for i in range(len(finalized)): finalized[i][0]["score"] = reranking_scores[i] return finalized
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/iterative_refinement_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name """ def __init__(self, model, forward=True, backward=True): self.bhooks = [] self.fhooks = [] self.forward = forward self.backward = backward self.named_parameters = list(model.named_parameters()) self.reset() for name, mod in model.named_modules(): mod.__module_name = name self.add_hooks(mod) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): # Dump out all model gnorms to enable better debugging norm = {} gradients = {} for name, param in self.named_parameters: if param.grad is not None: grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32) norm[name] = grad_norm.item() if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any(): gradients[name] = param.grad.data if len(gradients) > 0: logger.info("Detected nan/inf grad norm, dumping norms...") logger.info(f"norms: {norm}") logger.info(f"gradients: {gradients}") self.close() def add_hooks(self, module): if self.forward: self.fhooks.append(module.register_forward_hook(self.fhook_fn)) if self.backward: self.bhooks.append(module.register_backward_hook(self.bhook_fn)) def reset(self): self.has_printed_f = False self.has_printed_b = False def _detect(self, tensor, name, backward): err = None if ( torch.is_floating_point(tensor) # single value tensors (like the loss) will not provide much info and tensor.numel() >= 2 ): with torch.no_grad(): if torch.isnan(tensor).any(): err = "NaN" elif torch.isinf(tensor).any(): err = "Inf" if err is not None: err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}" return err def _apply(self, module, inp, x, backward): if torch.is_tensor(x): if isinstance(inp, tuple) and len(inp) > 0: inp = inp[0] err = self._detect(x, module.__module_name, backward) if err is not None: if torch.is_tensor(inp) and not backward: err += ( f" input max: {inp.max().item()}, input min: {inp.min().item()}" ) has_printed_attr = "has_printed_b" if backward else "has_printed_f" logger.warning(err) setattr(self, has_printed_attr, True) elif isinstance(x, dict): for v in x.values(): self._apply(module, inp, v, backward) elif isinstance(x, list) or isinstance(x, tuple): for v in x: self._apply(module, inp, v, backward) def fhook_fn(self, module, inp, output): if not self.has_printed_f: self._apply(module, inp, output, backward=False) def bhook_fn(self, module, inp, output): if not self.has_printed_b: self._apply(module, inp, output, backward=True) def close(self): for hook in self.fhooks + self.bhooks: hook.remove()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/nan_detector.py
__version__ = "1.0.0a0"
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/version.py
# Originally from Microsoft Corporation. # Licensed under the MIT License. """ Wrapper for ngram_repeat_block cuda extension """ import torch from torch import nn import math from typing import Dict, List, Optional import warnings try: from fairseq import ngram_repeat_block_cuda EXTENSION_BUILT = True except ImportError: EXTENSION_BUILT = False def is_cuda_extension_usable() -> bool: """Check whether ngram_repeat_block_cuda is built properly""" if not EXTENSION_BUILT or not torch.cuda.is_available(): return False bsz = 2 tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") lprobs = torch.rand((8, 12), device="cuda") try: outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) outputs = outputs + 4 # This line breaks if the extension is built incorrectly. return True except RuntimeError: warnings.warn( "NGramRepeatBlock extension must be rebuilt." 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' ) return False class NGramRepeatBlock(nn.Module): """ Wrapper class for calling ngram_repeat_block cuda extension """ def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True): super().__init__() self.use_extension = is_cuda_extension_usable() if use_extension else False self.no_repeat_ngram_size = no_repeat_ngram_size def reset_parameters(self): pass @torch.jit.unused def call_cuda_extension( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): return ngram_repeat_block_cuda.forward( tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size ) def forward( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): """ Args: tokens(Tensor): Input tokens(Bsz*beam, seq_len) lprobs(Tensor): likelihood probability, Expected to be updated in place.(Bsz*beam, vocab_size) bsz(int): batch size step(int): current step beam_size(int): beam size no_repeat_ngram_size(int): Ngram size """ msg = f"expected {bsz *beam_size} got" assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}" assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}" if self.use_extension: return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step) else: return self._no_repeat_ngram( tokens, lprobs, bsz, beam_size, step, ) def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf""" gen_ngrams: List[Dict[str, List[int]]] = [ torch.jit.annotate(Dict[str, List[int]], {}) for bbsz_idx in range(bsz * beam_size) ] cpu_tokens = tokens.cpu() for bbsz_idx in range(bsz * beam_size): gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist() for ngram in self.transpose_list( [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)] ): key = ",".join([str(x) for x in ngram[:-1]]) gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get( key, torch.jit.annotate(List[int], []) ) + [ngram[-1]] if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [ self.calculate_banned_tokens( tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx ) for bbsz_idx in range(bsz * beam_size) ] else: banned_tokens = [ torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) ] for bbsz_idx in range(bsz * beam_size): lprobs[bbsz_idx][ torch.tensor(banned_tokens[bbsz_idx]).long() ] = torch.tensor(-math.inf).to(lprobs) return lprobs @staticmethod def calculate_banned_tokens( tokens, step: int, gen_ngrams: List[Dict[str, List[int]]], no_repeat_ngram_size: int, bbsz_idx: int, ): tokens_list: List[int] = tokens[ bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1 ].tolist() # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = ",".join([str(x) for x in tokens_list]) return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], [])) @staticmethod def transpose_list(l: List[List[int]]): # GeneratorExp aren't supported in TS so ignoring the lint min_len = min([len(x) for x in l]) # noqa l2 = [[row[i] for row in l] for i in range(min_len)] return l2
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/ngram_repeat_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace from typing import Union from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import populate_dataclass, merge_with_parent from hydra.core.config_store import ConfigStore from omegaconf import DictConfig REGISTRIES = {} def setup_registry(registry_name: str, base_class=None, default=None, required=False): assert registry_name.startswith("--") registry_name = registry_name[2:].replace("-", "_") REGISTRY = {} REGISTRY_CLASS_NAMES = set() DATACLASS_REGISTRY = {} # maintain a registry of all registries if registry_name in REGISTRIES: return # registry already exists REGISTRIES[registry_name] = { "registry": REGISTRY, "default": default, "dataclass_registry": DATACLASS_REGISTRY, } def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs): if isinstance(cfg, DictConfig): choice = cfg._name if choice and choice in DATACLASS_REGISTRY: dc = DATACLASS_REGISTRY[choice] cfg = merge_with_parent(dc(), cfg) elif isinstance(cfg, str): choice = cfg if choice in DATACLASS_REGISTRY: cfg = DATACLASS_REGISTRY[choice]() else: choice = getattr(cfg, registry_name, None) if choice in DATACLASS_REGISTRY: cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg) if choice is None: if required: raise ValueError("{} is required!".format(registry_name)) return None cls = REGISTRY[choice] if hasattr(cls, "build_" + registry_name): builder = getattr(cls, "build_" + registry_name) else: builder = cls return builder(cfg, *extra_args, **extra_kwargs) def register_x(name, dataclass=None): def register_x_cls(cls): if name in REGISTRY: raise ValueError( "Cannot register duplicate {} ({})".format(registry_name, name) ) if cls.__name__ in REGISTRY_CLASS_NAMES: raise ValueError( "Cannot register {} with duplicate class name ({})".format( registry_name, cls.__name__ ) ) if base_class is not None and not issubclass(cls, base_class): raise ValueError( "{} must extend {}".format(cls.__name__, base_class.__name__) ) if dataclass is not None and not issubclass(dataclass, FairseqDataclass): raise ValueError( "Dataclass {} must extend FairseqDataclass".format(dataclass) ) cls.__dataclass = dataclass if cls.__dataclass is not None: DATACLASS_REGISTRY[name] = cls.__dataclass cs = ConfigStore.instance() node = dataclass() node._name = name cs.store(name=name, group=registry_name, node=node, provider="fairseq") REGISTRY[name] = cls return cls return register_x_cls return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/registry.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import os import sys try: from .version import __version__ # noqa except ImportError: version_txt = os.path.join(os.path.dirname(__file__), "version.txt") with open(version_txt) as f: __version__ = f.read().strip() __all__ = ["pdb"] # backwards compatibility to support `from fairseq.X import Y` from fairseq.distributed import utils as distributed_utils from fairseq.logging import meters, metrics, progress_bar # noqa sys.modules["fairseq.distributed_utils"] = distributed_utils sys.modules["fairseq.meters"] = meters sys.modules["fairseq.metrics"] = metrics sys.modules["fairseq.progress_bar"] = progress_bar # initialize hydra from fairseq.dataclass.initialize import hydra_init hydra_init() import fairseq.criterions # noqa import fairseq.distributed # noqa import fairseq.models # noqa import fairseq.modules # noqa import fairseq.optim # noqa import fairseq.optim.lr_scheduler # noqa import fairseq.pdb # noqa import fairseq.scoring # noqa import fairseq.tasks # noqa import fairseq.token_generation_constraints # noqa import fairseq.benchmark # noqa import fairseq.model_parallel # noqa
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import search, utils from fairseq.data import data_utils from fairseq.models import FairseqIncrementalDecoder from torch import Tensor from fairseq.ngram_repeat_block import NGramRepeatBlock class SequenceGenerator(nn.Module): def __init__( self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None, symbols_to_strip_from_output=None, lm_model=None, lm_weight=1.0, ): """Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) """ super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.tgt_dict = tgt_dict self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() if eos is None else eos self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.temperature = temperature self.match_source_len = match_source_len if no_repeat_ngram_size > 0: self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size) else: self.repeat_ngram_blocker = None assert temperature > 0, "--temperature must be greater than 0" self.search = ( search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy ) # We only need to set src_lengths in LengthConstrainedBeamSearch. # As a module attribute, setting it would break in multithread # settings when the model is shared. self.should_set_src_lengths = ( hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths ) self.model.eval() self.lm_model = lm_model self.lm_weight = lm_weight if self.lm_model is not None: self.lm_model.eval() def cuda(self): self.model.cuda() return self @torch.no_grad() def forward( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, bos_token: Optional[int] = None, ): """Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, prefix_tokens, bos_token=bos_token) # TODO(myleott): unused, deprecate after pytorch-translate migration def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): """Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations """ for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if "net_input" not in s: continue input = s["net_input"] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != "prev_output_tokens" } if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if timer is not None: timer.stop(sum(len(h[0]["tokens"]) for h in hypos)) for i, id in enumerate(s["id"].data): # remove padding src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad) ref = ( utils.strip_pad(s["target"].data[i, :], self.pad) if s["target"] is not None else None ) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs): """Generate translations. Match the api of other fairseq generators. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens constraints (torch.LongTensor, optional): force decoder to include the list of constraints bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, **kwargs) def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, constraints: Optional[Tensor] = None, bos_token: Optional[int] = None, ): incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(self.model.models_size) ], ) net_input = sample["net_input"] if "src_tokens" in net_input: src_tokens = net_input["src_tokens"] # length of the source text being the character length except EndOfSentence and pad src_lengths = ( (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) ) elif "source" in net_input: src_tokens = net_input["source"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) else: raise Exception("expected src_tokens or source in net input") # bsz: total number of sentences in beam # Note that src_tokens may have more than 2 dimensions (i.e. audio features) bsz, src_len = src_tokens.size()[:2] beam_size = self.beam_size if constraints is not None and not self.search.supports_constraints: raise NotImplementedError( "Target-side constraints were provided, but search method doesn't support them" ) # Initialize constraints, when active self.search.init_constraints(constraints, beam_size) max_len: int = -1 if self.match_source_len: max_len = src_lengths.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker self.model.max_decoder_positions() - 1, ) assert ( self.min_len <= max_len ), "min_len cannot be larger than max_len, please adjust these!" # compute the encoder output for each beam encoder_outs = self.model.forward_encoder(net_input) # placeholder of indices for bsz * beam_size to hold tokens and accumulative scores new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order) # ensure encoder_outs is a List. assert encoder_outs is not None # initialize buffers scores = ( torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float() ) # +1 for eos; pad is never chosen for scoring tokens = ( torch.zeros(bsz * beam_size, max_len + 2) .to(src_tokens) .long() .fill_(self.pad) ) # +2 for eos and pad tokens[:, 0] = self.eos if bos_token is None else bos_token attn: Optional[Tensor] = None # A list that indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = ( torch.zeros(bsz, beam_size).to(src_tokens).eq(-1) ) # forward and backward-compatible False mask # list of completed sentences finalized = torch.jit.annotate( List[List[Dict[str, Tensor]]], [torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)], ) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step finished = [ False for i in range(bsz) ] # a boolean array indicating if the sentence at the index is finished or not num_remaining_sent = bsz # number of sentences remaining # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = ( (torch.arange(0, bsz) * beam_size) .unsqueeze(1) .type_as(tokens) .to(src_tokens.device) ) cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device) reorder_state: Optional[Tensor] = None batch_idxs: Optional[Tensor] = None original_batch_idxs: Optional[Tensor] = None if "id" in sample and isinstance(sample["id"], Tensor): original_batch_idxs = sample["id"] else: original_batch_idxs = torch.arange(0, bsz).type_as(tokens) for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as( batch_idxs ) reorder_state.view(-1, beam_size).add_( corr.unsqueeze(-1) * beam_size ) original_batch_idxs = original_batch_idxs[batch_idxs] self.model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = self.model.reorder_encoder_out( encoder_outs, reorder_state ) lprobs, avg_attn_scores = self.model.forward_decoder( tokens[:, : step + 1], encoder_outs, incremental_states, self.temperature, ) if self.lm_model is not None: lm_out = self.lm_model(tokens[:, : step + 1]) probs = self.lm_model.get_normalized_probs( lm_out, log_probs=True, sample=None ) probs = probs[:, -1, :] * self.lm_weight lprobs += probs lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # handle max length constraint if step >= max_len: lprobs[:, : self.eos] = -math.inf lprobs[:, self.eos + 1 :] = -math.inf # handle prefix tokens (possibly with different lengths) if ( prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len ): lprobs, tokens, scores = self._prefix_tokens( step, lprobs, scores, tokens, prefix_tokens, beam_size ) elif step < self.min_len: # minimum length constraint (does not apply if using prefix_tokens) lprobs[:, self.eos] = -math.inf # Record attention scores, only support avg_attn_scores is a Tensor if avg_attn_scores is not None: if attn is None: attn = torch.empty( bsz * beam_size, avg_attn_scores.size(1), max_len + 2 ).to(scores) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) eos_bbsz_idx = torch.empty(0).to( tokens ) # indices of hypothesis ending with eos (finished sentences) eos_scores = torch.empty(0).to( scores ) # scores of hypothesis ending with eos (finished sentences) if self.should_set_src_lengths: self.search.set_src_lengths(src_lengths) if self.repeat_ngram_blocker is not None: lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step) # Shape: (batch, cand_size) cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], tokens[:, : step + 1], original_batch_idxs, ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos # Shape of eos_mask: (batch size, beam size) eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf) eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask) # only consider eos when it's among the top beam_size indices # Now we know what beam item(s) to finish # Shape: 1d list of absolute-numbered eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents: List[int] = [] if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = self.finalize_hypos( step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, attn, src_lengths, max_len, ) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if self.search.stop_on_max_len and step >= max_len: break assert step < max_len, f"{step} < {max_len}" # Remove finalized sentences (ones for which {beam_size} # finished hypotheses have been generated) from the batch. if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = torch.ones( bsz, dtype=torch.bool, device=cand_indices.device ) batch_mask[finalized_sents] = False # TODO replace `nonzero(as_tuple=False)` after TorchScript supports it batch_idxs = torch.arange( bsz, device=cand_indices.device ).masked_select(batch_mask) # Choose the subset of the hypothesized constraints that will continue self.search.prune_sentences(batch_idxs) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths = src_lengths[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view( new_bsz * beam_size, attn.size(1), -1 ) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos # Rewrite the operator since the element wise or is not supported in torchscript. eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size])) active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just # the hypos with the smallest values in active_mask. # {active_hypos} indicates which {beam_size} hypotheses # from the list of {2 * beam_size} candidates were # selected. Shapes: (batch size, beam size) new_cands_to_ignore, active_hypos = torch.topk( active_mask, k=beam_size, dim=1, largest=False ) # update cands_to_ignore to ignore any finalized hypos. cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] # Make sure there is at least one active item for each sentence in the batch. assert (~cands_to_ignore).any(dim=1).all() # update cands_to_ignore to ignore any finalized hypos # {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam # can be selected more than once). active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos) active_scores = torch.gather(cand_scores, dim=1, index=active_hypos) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses # Set the tokens for each beam (can select the same row more than once) tokens[:, : step + 1] = torch.index_select( tokens[:, : step + 1], dim=0, index=active_bbsz_idx ) # Select the next token for each of them tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather( cand_indices, dim=1, index=active_hypos ) if step > 0: scores[:, :step] = torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx ) scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather( cand_scores, dim=1, index=active_hypos ) # Update constraints based on which candidates were selected for the next beam self.search.update_constraints(active_hypos) # copy attention for active hypotheses if attn is not None: attn[:, :, : step + 2] = torch.index_select( attn[:, :, : step + 2], dim=0, index=active_bbsz_idx ) # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): scores = torch.tensor( [float(elem["score"].item()) for elem in finalized[sent]] ) _, sorted_scores_indices = torch.sort(scores, descending=True) finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices] finalized[sent] = torch.jit.annotate( List[Dict[str, Tensor]], finalized[sent] ) return finalized def _prefix_tokens( self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int ): """Handle prefix tokens""" prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1)) prefix_mask = prefix_toks.ne(self.pad) lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs) lprobs[prefix_mask] = lprobs[prefix_mask].scatter( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask] ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[ :, 0, 1 : step + 1 ] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() # copy tokens, scores and lprobs from the first beam to all beams tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size) scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size) lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size) return lprobs, tokens, scores def replicate_first_beam(self, tensor, mask, beam_size: int): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) def finalize_hypos( self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[str, Tensor]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int, ): """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly. A sentence is finalized when {beam_size} finished items have been collected for it. Returns number of sentences (not beam items) being finalized. These will be removed from the batch and not processed further. Args: bbsz_idx (Tensor): """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors. # tokens is (batch * beam, max_len). So the index_select # gets the newly EOS rows, then selects cols 1..{step + 2} tokens_clone = tokens.index_select(0, bbsz_idx)[ :, 1 : step + 2 ] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = ( attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2] if attn is not None else None ) # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty # cum_unfin records which sentences in the batch are finished. # It helps match indexing between (a) the original sentences # in the batch and (b) the current, possibly-reduced set of # sentences. cum_unfin: List[int] = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) # The keys here are of the form "{sent}_{unfin_idx}", where # "unfin_idx" is the index in the current (possibly reduced) # list of sentences, and "sent" is the index in the original, # unreduced batch # set() is not supported in script export sents_seen: Dict[str, Optional[Tensor]] = {} # For every finished beam item for i in range(bbsz_idx.size()[0]): idx = bbsz_idx[i] score = eos_scores[i] # sentence index in the current (possibly reduced) batch unfin_idx = idx // beam_size # sentence index in the original (unreduced) batch sent = unfin_idx + cum_unfin[unfin_idx] # Cannot create dict for key type '(int, int)' in torchscript. # The workaround is to cast int to string seen = str(sent.item()) + "_" + str(unfin_idx.item()) if seen not in sents_seen: sents_seen[seen] = None if self.match_source_len and step > src_lengths[unfin_idx]: score = torch.tensor(-math.inf).to(score) # An input sentence (among those in a batch) is finished when # beam_size hypotheses have been collected for it if len(finalized[sent]) < beam_size: if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i] else: hypo_attn = torch.empty(0) finalized[sent].append( { "tokens": tokens_clone[i], "score": score, "attention": hypo_attn, # src_len x tgt_len "alignment": torch.empty(0), "positional_scores": pos_scores[i], } ) newly_finished: List[int] = [] for seen in sents_seen.keys(): # check termination conditions for this sentence sent: int = int(float(seen.split("_")[0])) unfin_idx: int = int(float(seen.split("_")[1])) if not finished[sent] and self.is_finished( step, unfin_idx, max_len, len(finalized[sent]), beam_size ): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def is_finished( self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int, ): """ Check whether decoding for a sentence is finished, which occurs when the list of finalized sentences has reached the beam size, or when we reach the maximum length. """ assert finalized_sent_len <= beam_size if finalized_sent_len == beam_size or step == max_len: return True return False class EnsembleModel(nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models_size = len(models) # method '__len__' is not supported in ModuleList for torch script self.single_model = models[0] self.models = nn.ModuleList(models) self.has_incremental: bool = False if all( hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models ): self.has_incremental = True def forward(self): pass def has_encoder(self): return hasattr(self.single_model, "encoder") def has_incremental_states(self): return self.has_incremental def max_decoder_positions(self): return min([m.max_decoder_positions() for m in self.models]) @torch.jit.export def forward_encoder(self, net_input: Dict[str, Tensor]): if not self.has_encoder(): return None return [model.encoder.forward_torchscript(net_input) for model in self.models] @torch.jit.export def forward_decoder( self, tokens, encoder_outs: List[Dict[str, List[Tensor]]], incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], temperature: float = 1.0, ): log_probs = [] avg_attn: Optional[Tensor] = None encoder_out: Optional[Dict[str, List[Tensor]]] = None for i, model in enumerate(self.models): if self.has_encoder(): encoder_out = encoder_outs[i] # decode each model if self.has_incremental_states(): decoder_out = model.decoder.forward( tokens, encoder_out=encoder_out, incremental_state=incremental_states[i], ) else: decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out) attn: Optional[Tensor] = None decoder_len = len(decoder_out) if decoder_len > 1 and decoder_out[1] is not None: if isinstance(decoder_out[1], Tensor): attn = decoder_out[1] else: attn_holder = decoder_out[1]["attn"] if isinstance(attn_holder, Tensor): attn = attn_holder elif attn_holder is not None: attn = attn_holder[0] if attn is not None: attn = attn[:, -1, :] decoder_out_tuple = ( decoder_out[0][:, -1:, :].div_(temperature), None if decoder_len <= 1 else decoder_out[1], ) probs = model.get_normalized_probs( decoder_out_tuple, log_probs=True, sample=None ) probs = probs[:, -1, :] if self.models_size == 1: return probs, attn log_probs.append(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log( self.models_size ) if avg_attn is not None: avg_attn.div_(self.models_size) return avg_probs, avg_attn @torch.jit.export def reorder_encoder_out( self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order ): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_outs: List[Dict[str, List[Tensor]]] = [] if not self.has_encoder(): return new_outs for i, model in enumerate(self.models): assert encoder_outs is not None new_outs.append( model.encoder.reorder_encoder_out(encoder_outs[i], new_order) ) return new_outs @torch.jit.export def reorder_incremental_state( self, incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): if not self.has_incremental_states(): return for i, model in enumerate(self.models): model.decoder.reorder_incremental_state_scripting( incremental_states[i], new_order ) class SequenceGeneratorWithAlignment(SequenceGenerator): def __init__( self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs ): """Generates translations of a given source sentence. Produces alignments following "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: left_pad_target (bool, optional): Whether or not the hypothesis should be left padded or not when they are teacher forced for generating alignments. """ super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs) self.left_pad_target = left_pad_target if print_alignment == "hard": self.extract_alignment = utils.extract_hard_alignment elif print_alignment == "soft": self.extract_alignment = utils.extract_soft_alignment @torch.no_grad() def generate(self, models, sample, **kwargs): finalized = super()._generate(sample, **kwargs) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] beam_size = self.beam_size ( src_tokens, src_lengths, prev_output_tokens, tgt_tokens, ) = self._prepare_batch_for_alignment(sample, finalized) if any(getattr(m, "full_context_alignment", False) for m in self.model.models): attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens) else: attn = [ finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0) for i in range(bsz * beam_size) ] if src_tokens.device != "cpu": src_tokens = src_tokens.to("cpu") tgt_tokens = tgt_tokens.to("cpu") attn = [i.to("cpu") for i in attn] # Process the attn matrix to extract hard alignments. for i in range(bsz * beam_size): alignment = self.extract_alignment( attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos ) finalized[i // beam_size][i % beam_size]["alignment"] = alignment return finalized def _prepare_batch_for_alignment(self, sample, hypothesis): src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] src_tokens = ( src_tokens[:, None, :] .expand(-1, self.beam_size, -1) .contiguous() .view(bsz * self.beam_size, -1) ) src_lengths = sample["net_input"]["src_lengths"] src_lengths = ( src_lengths[:, None] .expand(-1, self.beam_size) .contiguous() .view(bsz * self.beam_size) ) prev_output_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True, ) tgt_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False, ) return src_tokens, src_lengths, prev_output_tokens, tgt_tokens class EnsembleModelWithAlignment(EnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) def forward_align(self, src_tokens, src_lengths, prev_output_tokens): avg_attn = None for model in self.models: decoder_out = model(src_tokens, src_lengths, prev_output_tokens) attn = decoder_out[1]["attn"][0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(self.models) > 1: avg_attn.div_(len(self.models)) return avg_attn
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import multiprocessing import os import pdb import sys __all__ = ["set_trace"] _stdin = [None] _stdin_lock = multiprocessing.Lock() try: _stdin_fd = sys.stdin.fileno() except Exception: _stdin_fd = None class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if _stdin_fd is not None: if not _stdin[0]: _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/pdb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line.split()
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/tokenizer.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import os from typing import Any, Dict, Iterator, List import torch from fairseq import utils from fairseq.data import encoders from omegaconf import open_dict from torch import nn logger = logging.getLogger(__name__) def from_pretrained( model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == "checkpoint_file": checkpoint_file = v elif ( k != "path" # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path["path"] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith("."): kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs["data"] = file_utils.load_archive_file(data_name_or_path) for file, arg in { "code": "bpe_codes", "bpecodes": "bpe_codes", "sentencepiece.bpe.model": "sentencepiece_model", "merges.txt": "bpe_merges", "vocab.json": "bpe_vocab", }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if "user_dir" in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"])) models, args, task = checkpoint_utils.load_model_ensemble_and_task( [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs, ) return { "args": args, "task": task, "models": models, } class GeneratorHubInterface(nn.Module): """ PyTorch Hub interface for generating sequences from a pre-trained translation or language model. """ def __init__(self, cfg, task, models): super().__init__() self.cfg = cfg self.task = task self.models = nn.ModuleList(models) self.src_dict = task.source_dictionary self.tgt_dict = task.target_dictionary # optimize model for generation for model in self.models: model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) self.align_dict = utils.load_align_dict(cfg.generation.replace_unk) self.tokenizer = encoders.build_tokenizer(cfg.tokenizer) self.bpe = encoders.build_bpe(cfg.bpe) self.max_positions = utils.resolve_max_positions( self.task.max_positions(), *[model.max_positions() for model in models] ) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def translate( self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs ) -> List[str]: return self.sample(sentences, beam, verbose, **kwargs) def sample( self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs ) -> List[str]: if isinstance(sentences, str): return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0] tokenized_sentences = [self.encode(sentence) for sentence in sentences] batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs) return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos] def score(self, sentences: List[str], **kwargs): if isinstance(sentences, str): return self.score([sentences], **kwargs)[0] # NOTE: this doesn't support translation tasks currently tokenized_sentences = [self.encode(sentence) for sentence in sentences] return [ hypos[0] for hypos in self.generate( tokenized_sentences, score_reference=True, **kwargs ) ] def generate( self, tokenized_sentences: List[torch.LongTensor], beam: int = 5, verbose: bool = False, skip_invalid_size_inputs=False, inference_step_args=None, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1: return self.generate( tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs )[0] # build generator using current args as well as any kwargs gen_args = copy.deepcopy(self.cfg.generation) with open_dict(gen_args): gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator(self.models, gen_args) inference_step_args = inference_step_args or {} results = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): batch = utils.apply_to_sample(lambda t: t.to(self.device), batch) translations = self.task.inference_step( generator, self.models, batch, **inference_step_args ) for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) # sort output to match input order outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] if verbose: def getarg(name, default): return getattr(gen_args, name, getattr(self.cfg, name, default)) for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs): src_str_with_unk = self.string(source_tokens) logger.info("S\t{}".format(src_str_with_unk)) for hypo in target_hypotheses: hypo_str = self.decode(hypo["tokens"]) logger.info("H\t{}\t{}".format(hypo["score"], hypo_str)) logger.info( "P\t{}".format( " ".join( map( lambda x: "{:.4f}".format(x), hypo["positional_scores"].tolist(), ) ) ) ) if hypo["alignment"] is not None and getarg( "print_alignment", False ): logger.info( "A\t{}".format( " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo["alignment"] ] ) ) ) return outputs def encode(self, sentence: str) -> torch.LongTensor: sentence = self.tokenize(sentence) sentence = self.apply_bpe(sentence) return self.binarize(sentence) def decode(self, tokens: torch.LongTensor) -> str: sentence = self.string(tokens) sentence = self.remove_bpe(sentence) return self.detokenize(sentence) def tokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.encode(sentence) return sentence def detokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.decode(sentence) return sentence def apply_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.encode(sentence) return sentence def remove_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.decode(sentence) return sentence def binarize(self, sentence: str) -> torch.LongTensor: return self.src_dict.encode_line(sentence, add_if_not_exist=False).long() def string(self, tokens: torch.LongTensor) -> str: return self.tgt_dict.string(tokens) def _build_batches( self, tokens: List[List[int]], skip_invalid_size_inputs: bool ) -> Iterator[Dict[str, Any]]: lengths = torch.LongTensor([t.numel() for t in tokens]) batch_iterator = self.task.get_batch_iterator( dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, disable_iterator_cache=True, ).next_epoch_itr(shuffle=False) return batch_iterator class BPEHubInterface(object): """PyTorch Hub interface for Byte-Pair Encoding (BPE).""" def __init__(self, bpe, **kwargs): super().__init__() args = argparse.Namespace(bpe=bpe, **kwargs) self.bpe = encoders.build_bpe(args) assert self.bpe is not None def encode(self, sentence: str) -> str: return self.bpe.encode(sentence) def decode(self, sentence: str) -> str: return self.bpe.decode(sentence) class TokenizerHubInterface(object): """PyTorch Hub interface for tokenization.""" def __init__(self, tokenizer, **kwargs): super().__init__() args = argparse.Namespace(tokenizer=tokenizer, **kwargs) self.tokenizer = encoders.build_tokenizer(args) assert self.tokenizer is not None def encode(self, sentence: str) -> str: return self.tokenizer.encode(sentence) def decode(self, sentence: str) -> str: return self.tokenizer.decode(sentence)
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/hub_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid from typing import Dict, Optional from torch import Tensor class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: str) -> str: return "{}.{}".format(self._incremental_state_id, key) def get_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple( b for b in cls.__bases__ if b != FairseqIncrementalState ) return cls
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/incremental_decoding_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import contextlib import copy import importlib import logging import os import sys import tempfile import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional import torch import torch.nn.functional as F from fairseq.modules.multihead_attention import MultiheadAttention from torch import Tensor try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False try: import torch_xla.core.xla_model as xm except ImportError: xm = None logger = logging.getLogger(__name__) MANIFOLD_PATH_SEP = "|" class FileContentsAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(FileContentsAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): from fairseq.file_io import PathManager if PathManager.isfile(values): with PathManager.open(values) as f: argument = f.read().strip() else: argument = values setattr(namespace, self.dest, argument) def split_paths(paths: str) -> List[str]: return ( paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) ) def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task ) def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample) def get_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key) def set_incremental_state( module: MultiheadAttention, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None, ): hypo_str = tgt_dict.string( hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore ) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm( chunk_size, has_inf, [cur_device_grads], False ) norms.append(norm[0].to(torch.cuda.current_device())) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm @torch.no_grad() def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.0) else: return torch.tensor(0.0) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads: g.mul_(clip_coef) return total_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t) def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)): fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: raise FileNotFoundError(module_path) # ensure that user modules are only imported once import_user_module.memo = getattr(import_user_module, "memo", set()) if module_path not in import_user_module.memo: import_user_module.memo.add(module_path) module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) else: raise ImportError( "Failed to import --user-dir={} because the corresponding module name " "({}) is not globally unique. Please rename the directory to " "something unique and try again.".format(module_path, module_name) ) def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32) def get_perplexity(loss, round=2, base=2): from fairseq.logging.meters import safe_round if loss is None: return 0.0 try: return safe_round(base ** loss, round) except OverflowError: return float("inf") def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def get_activation_fn(activation: str) -> Callable: """ Returns the activation function corresponding to `activation` """ from fairseq.modules import gelu, gelu_accurate if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ] @contextlib.contextmanager def model_eval(model): is_training = model.training model.eval() yield model.train(is_training) def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False def get_rng_state(): state = {"torch_rng_state": torch.get_rng_state()} if xm is not None: state["xla_rng_state"] = xm.get_rng_state() if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(state): torch.set_rng_state(state["torch_rng_state"]) if xm is not None: xm.set_rng_state(state["xla_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = get_rng_state() torch.manual_seed(seed) if xm is not None: xm.set_rng_state(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def __enter__(self): return self def __exit__(self, *exc): set_rng_state(self.rng_state) def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_invalid = ( ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad)).nonzero(as_tuple=False) ) src_valid = ( ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1) ) alignment = [] if len(tgt_valid) != 0 and len(src_valid) != 0: attn_valid = attn[tgt_valid, src_valid] alignment = [ ["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid ] return alignment def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def get_tpu_device(): return xm.xla_device() def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl from fairseq.data import iterators xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), ) class CudaEnvironment(object): def __init__(self): cur_device = torch.cuda.current_device() prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device)) self.name = prop.name self.major = prop.major self.minor = prop.minor self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024 @staticmethod def pretty_print_cuda_env_list(cuda_env_list): """ Given a list of CudaEnviorments, pretty print them """ num_workers = len(cuda_env_list) center = "CUDA enviroments for all {} workers".format(num_workers) banner_len = 40 - len(center) // 2 first_line = "*" * banner_len + center + "*" * banner_len logger.info(first_line) for r, env in enumerate(cuda_env_list): logger.info( "rank {:3d}: ".format(r) + "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor) + "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB) + "name = {:40s}".format(env.name) ) logger.info(first_line) def csv_str_list(x): return x.split(",") def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default
EXA-1-master
exa/models/unilm-master/decoding/GAD/fairseq/utils.py