python_code
stringlengths
0
4.04M
repo_name
stringlengths
7
58
file_path
stringlengths
5
147
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from sacremoses.normalize import MosesPunctNormalizer def main(args): normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) for line in sys.stdin: print(normalizer.normalize(line.rstrip()), flush=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--lang", "-l", default="en") parser.add_argument("--penn", "-p", action="store_true") args = parser.parse_args() main(args)
data2vec_vision-main
deltalm/src/examples/constrained_decoding/normalize.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import sacremoses def main(args): """Tokenizes, preserving tabs""" mt = sacremoses.MosesTokenizer(lang=args.lang) def tok(s): return mt.tokenize(s, return_str=True) for line in sys.stdin: parts = list(map(tok, line.split("\t"))) print(*parts, sep="\t", flush=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--lang", "-l", default="en") parser.add_argument("--penn", "-p", action="store_true") parser.add_argument("--fields", "-f", help="fields to tokenize") args = parser.parse_args() main(args)
data2vec_vision-main
deltalm/src/examples/constrained_decoding/tok.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import rxf_src # noqa
data2vec_vision-main
deltalm/src/examples/rxf/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
data2vec_vision-main
deltalm/src/examples/rxf/rxf_src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("sentence_prediction_r3f") class SentencePredictionR3F(FairseqCriterion): def __init__( self, task, eps, r3f_lambda, noise_type, classification_head_name, regression_target, ): super().__init__(task) self.eps = eps self.r3f_lambda = r3f_lambda self.noise_type = noise_type self.classification_head_name = classification_head_name self.regression_target = regression_target if self.noise_type in {"normal"}: self.noise_sampler = torch.distributions.normal.Normal( loc=0.0, scale=self.eps ) elif self.noise_type == "uniform": self.noise_sampler = torch.distributions.uniform.Uniform( low=-self.eps, high=self.eps ) else: raise Exception(f"unrecognized noise type {self.noise_type}") @staticmethod def add_args(parser): # fmt: off parser.add_argument('--eps', type=float, default=1e-5, help='noise eps') parser.add_argument('--r3f-lambda', type=float, default=1.0, help='lambda for combining logistic loss and noisy KL loss') parser.add_argument('--noise-type', type=str, default='uniform', choices=['normal', 'uniform'], help='type of noises for RXF methods') parser.add_argument('--classification-head-name', default='sentence_classification_head', help='name of the classification head to use') # fmt: on def _get_symm_kl(self, noised_logits, input_logits): return ( F.kl_div( F.log_softmax(noised_logits, dim=-1, dtype=torch.float32), F.softmax(input_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) + F.kl_div( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), F.softmax(noised_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) ) / noised_logits.size(0) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, "classification_heads") and self.classification_head_name in model.classification_heads ), "model must provide sentence classification head for --criterion=sentence_prediction" token_embeddings = model.encoder.sentence_encoder.embed_tokens( sample["net_input"]["src_tokens"] ) input_logits, _ = model( **sample["net_input"], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=token_embeddings, ) if model.training and self.noise_sampler: noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to( token_embeddings ) noised_embeddings = token_embeddings.detach().clone() + noise noised_logits, _ = model( **sample["net_input"], features_only=True, classification_head_name=self.classification_head_name, token_embeddings=noised_embeddings, ) symm_kl = self._get_symm_kl(noised_logits, input_logits) else: symm_kl = 0 targets = model.get_targets(sample, [input_logits]).view(-1) sample_size = targets.numel() if not self.regression_target: loss = F.nll_loss( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), targets, reduction="sum", ) if model.training: symm_kl = symm_kl * sample_size loss = loss + self.r3f_lambda * symm_kl else: logits = input_logits.squeeze().float() targets = targets.float() loss = F.mse_loss(logits, targets, reduction="sum") logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample_size, "sample_size": sample_size, } if not self.regression_target: preds = input_logits.max(dim=1)[1] logging_output.update(ncorrect=(preds == targets).sum().item()) if model.training and self.noise_sampler: logging_output.update( symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data ) return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2), "symm_kl": symm_kl_sum / sample_size, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]: ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) agg_output.update(accuracy=ncorrect / nsentences) if sample_size != ntokens: agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) return agg_output
data2vec_vision-main
deltalm/src/examples/rxf/rxf_src/sentence_prediction_r3f.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss @register_criterion("label_smoothed_cross_entropy_r3f") class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion): def __init__( self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type ): super().__init__(task) self.sentence_avg = sentence_avg self.label_smoothing = label_smoothing self.eps = eps self.r3f_lambda = r3f_lambda self.noise_type = noise_type if self.noise_type in {"normal"}: self.noise_sampler = torch.distributions.normal.Normal( loc=0.0, scale=self.eps ) elif self.noise_type == "uniform": self.noise_sampler = torch.distributions.uniform.Uniform( low=-self.eps, high=self.eps ) else: raise Exception(f"unrecognized noise type {self.noise_type}") @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--label-smoothing', default=0., type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing') parser.add_argument('--eps', type=float, default=1e-5, help='noise eps') parser.add_argument('--r3f-lambda', type=float, default=1.0, help='lambda for combining logistic loss and noisy KL loss') parser.add_argument('--noise-type', type=str, default='normal', choices=['normal', 'uniform'], help='type of noises') # fmt: on def _get_symm_kl(self, noised_logits, input_logits): return ( F.kl_div( F.log_softmax(noised_logits, dim=-1, dtype=torch.float32), F.softmax(input_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) + F.kl_div( F.log_softmax(input_logits, dim=-1, dtype=torch.float32), F.softmax(noised_logits, dim=-1, dtype=torch.float32), None, None, "sum", ) ) / noised_logits.size(0) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"]) input_logits, extra = model(**sample["net_input"]) loss, nll_loss = self.compute_loss( model, (input_logits, extra), sample, reduce=reduce ) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) if model.training: noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to( token_embeddings ) noised_embeddings = token_embeddings.clone() + noise noised_logits, _ = model( **sample["net_input"], token_embeddings=noised_embeddings ) symm_kl = self._get_symm_kl(noised_logits, input_logits) if model.training: symm_kl = symm_kl * sample_size loss = loss + self.r3f_lambda * symm_kl logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } if model.training: logging_output.update( symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data ) return loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): lprobs = model.get_normalized_probs(net_output, log_probs=True) lprobs = lprobs.view(-1, lprobs.size(-1)) target = model.get_targets(sample, net_output).view(-1, 1) loss, nll_loss = label_smoothed_nll_loss( lprobs, target, self.label_smoothing, ignore_index=self.padding_idx, reduce=reduce, ) return loss, nll_loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs) metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
data2vec_vision-main
deltalm/src/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import transformer_xl_model, truncated_bptt_lm_task # noqa
data2vec_vision-main
deltalm/src/examples/truncated_bptt/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import List, Optional, Tuple import torch from fairseq import distributed_utils as dist_utils, utils from fairseq.data import Dictionary, TokenBlockDataset, data_utils, iterators from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task from omegaconf import II logger = logging.getLogger(__name__) @dataclass class TruncatedBPTTLMConfig(FairseqDataclass): data: str = field(default="???", metadata={"help": "path to data directory"}) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sequence"}, ) batch_size: int = II("dataset.batch_size") # Some models use *max_target_positions* to know how many positional # embeddings to learn. We use II(...) to make it default to # *tokens_per_sample*, but in principle there could be more positional # embeddings than tokens in a single batch. This may also be irrelevant for # custom model implementations. max_target_positions: int = II("task.tokens_per_sample") # these will be populated automatically if not provided data_parallel_rank: Optional[int] = None data_parallel_size: Optional[int] = None @register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig) class TruncatedBPTTLMTask(FairseqTask): def __init__(self, cfg: TruncatedBPTTLMConfig): super().__init__(cfg) if cfg.data_parallel_rank is None or cfg.data_parallel_size is None: if torch.distributed.is_initialized(): cfg.data_parallel_rank = dist_utils.get_data_parallel_rank() cfg.data_parallel_size = dist_utils.get_data_parallel_world_size() else: cfg.data_parallel_rank = 0 cfg.data_parallel_size = 1 # load the dictionary paths = utils.split_paths(cfg.data) assert len(paths) > 0 self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(self.dictionary))) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split (e.g., train, valid, test)""" # support sharded datasets paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) # each element of *data* will be a tensorized line from the original # text dataset, similar to ``open(split_path).readlines()`` data = data_utils.load_indexed_dataset( split_path, self.dictionary, combine=combine ) if data is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) # this is similar to ``data.view(-1).split(tokens_per_sample)`` data = TokenBlockDataset( data, data.sizes, block_size=self.cfg.tokens_per_sample, pad=None, # unused eos=None, # unused break_mode="none", ) self.datasets[split] = TruncatedBPTTDataset( data=data, bsz_per_shard=self.cfg.batch_size, shard_id=self.cfg.data_parallel_rank, num_shards=self.cfg.data_parallel_size, ) def dataset(self, split): return self.datasets[split] def get_batch_iterator( self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs ): return iterators.EpochBatchIterator( dataset=dataset, collate_fn=self._collate_fn, num_workers=num_workers, epoch=epoch, buffer_size=data_buffer_size, # we don't use the batching functionality from EpochBatchIterator; # instead every item in *dataset* is a whole batch batch_sampler=[[i] for i in range(len(dataset))], disable_shuffling=True, ) def _collate_fn(self, items: List[List[torch.Tensor]]): # we don't use fairseq's batching functionality, so we expect a single # Tensor of type List[torch.Tensor] assert len(items) == 1 # item will have shape B x T (the last batch may have length < T) id, item = items[0] item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad()) B, T = item.size() # shift item one position over and append a padding token for the target target = torch.nn.functional.pad( item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad() ) # fairseq expects batches to have the following structure return { "id": torch.tensor([id]*item.size(0)), "net_input": { "src_tokens": item, }, "target": target, "nsentences": item.size(0), "ntokens": item.numel(), } def build_dataset_for_inference( self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs ) -> torch.utils.data.Dataset: eos = self.source_dictionary.eos() dataset = TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=eos, break_mode="eos", ) class Dataset(torch.utils.data.Dataset): def __getitem__(self, i): item = dataset[i] if item[-1] == eos: # remove eos to support generating with a prefix item = item[:-1] return (i, [item]) def __len__(self): return len(dataset) return Dataset() def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): if constraints is not None: raise NotImplementedError # SequenceGenerator doesn't use *src_tokens* directly, we need to # pass the *prefix_tokens* argument instead. if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] # begin generation with the end-of-sentence token bos_token = self.source_dictionary.eos() return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class TruncatedBPTTDataset(torch.utils.data.Dataset): def __init__( self, data: List[torch.Tensor], # ordered list of items bsz_per_shard, # number of items processed per GPUs per forward shard_id, # current GPU ID num_shards, # number of GPUs ): super().__init__() self.data = data def batchify(data, bsz): # Work out how cleanly we can divide the dataset into bsz parts. nbatch = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * bsz) # Evenly divide the data across the bsz batches. data = data.view(bsz, -1).contiguous() return data # total number of sequences processed by all GPUs in each forward pass global_batch_size = bsz_per_shard * num_shards """ With a 16 item dataset, bsz_per_shard=2 and num_shards=3, *indices* might look like: indices = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]] The size of the TruncatedBPTTDataset instance will be 2, and shard 1 will see items: [(0, [data[4], data[6]]), (1, [data[5], data[7]])] """ indices = batchify(torch.arange(len(data)), global_batch_size) assert indices.size(0) == global_batch_size self.my_indices = indices[ shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard ] assert self.my_indices.size(0) == bsz_per_shard def __len__(self): return self.my_indices.size(1) def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]: return (i, [self.data[idx] for idx in self.my_indices[:, i]])
data2vec_vision-main
deltalm/src/examples/truncated_bptt/truncated_bptt_lm_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Dict, List, Optional import torch from fairseq.dataclass import FairseqDataclass from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, ) from fairseq.modules.checkpoint_activations import checkpoint_wrapper from omegaconf import II logger = logging.getLogger(__name__) @dataclass class TransformerXLConfig(FairseqDataclass): # defaults come from the original Transformer-XL code cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) d_model: int = 500 n_head: int = 10 d_head: int = 50 d_inner: int = 1000 div_val: int = 1 n_layer: int = 12 mem_len: int = 0 clamp_len: int = -1 same_length: bool = False dropout: float = 0.0 dropatt: float = 0.0 checkpoint_activations: bool = False max_target_positions: int = II("task.max_target_positions") @register_model("transformer_xl", dataclass=TransformerXLConfig) class TransformerXLLanguageModel(FairseqLanguageModel): @classmethod def build_model(cls, cfg: TransformerXLConfig, task): return cls(TransformerXLDecoder(cfg, task)) class TransformerXLDecoder(FairseqIncrementalDecoder): def __init__(self, cfg, task): from transformers.configuration_transfo_xl import TransfoXLConfig from transformers.modeling_transfo_xl import TransfoXLLMHeadModel super().__init__(task.target_dictionary) self.cfg = cfg # remove any cutoffs larger than the vocab size cutoffs = [ cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) ] config = TransfoXLConfig( vocab_size=len(task.target_dictionary), cutoffs=cutoffs, d_model=cfg.d_model, d_embed=cfg.d_model, n_head=cfg.n_head, d_head=cfg.d_head, d_inner=cfg.d_inner, div_val=cfg.div_val, n_layer=cfg.n_layer, mem_len=cfg.mem_len, clamp_len=cfg.clamp_len, same_length=cfg.same_length, dropout=cfg.dropout, dropatt=cfg.dropatt, ) logger.info(config) self.model = TransfoXLLMHeadModel(config) # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` # which adds ``None`` values to an ``nn.ParameterList``, which is not # supported in PyTorch. Instead we can replace this with an # ``nn.ModuleList``, which does support ``None`` values. try: if all(p is None for p in self.model.crit.out_projs._parameters.values()): self.model.crit.out_projs = torch.nn.ModuleList( [None] * len(self.model.crit.out_projs._parameters) ) except Exception: pass if cfg.checkpoint_activations: for i in range(len(self.model.transformer.layers)): self.model.transformer.layers[i] = checkpoint_wrapper( self.model.transformer.layers[i] ) self._mems = None def forward( self, src_tokens, src_lengths=None, # unused incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): if incremental_state is not None: # used during inference mems = self.get_incremental_state(incremental_state, "mems") src_tokens = src_tokens[:, -1:] # only keep the most recent token else: mems = self._mems output = self.model( input_ids=src_tokens, mems=mems, return_dict=False, ) if len(output) >= 2: if incremental_state is not None: self.set_incremental_state(incremental_state, "mems", output[1]) else: self._mems = output[1] return (output[0],) def max_positions(self): return self.cfg.max_target_positions def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], new_order: torch.Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ mems = self.get_incremental_state(incremental_state, "mems") if mems is not None: new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] self.set_incremental_state(incremental_state, "mems", new_mems)
data2vec_vision-main
deltalm/src/examples/truncated_bptt/transformer_xl_model.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import argparse import glob import os from shutil import copy import h5py import numpy as np import soundfile as sf import torch import tqdm import fairseq from torch import nn def read_audio(fname): """ Load an audio file and return PCM along with the sample rate """ wav, sr = sf.read(fname) assert sr == 16e3 return wav, 16e3 class PretrainedWav2VecModel(nn.Module): def __init__(self, fname): super().__init__() model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname]) model = model[0] model.eval() self.model = model def forward(self, x): with torch.no_grad(): z = self.model.feature_extractor(x) if isinstance(z, tuple): z = z[0] c = self.model.feature_aggregator(z) return z, c class EmbeddingWriterConfig(argparse.ArgumentParser): def __init__(self): super().__init__("Pre-compute embeddings for wav2letter++ datasets") kwargs = {"action": "store", "type": str, "required": True} self.add_argument("--input", "-i", help="Input Directory", **kwargs) self.add_argument("--output", "-o", help="Output Directory", **kwargs) self.add_argument("--model", help="Path to model checkpoint", **kwargs) self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs) self.add_argument( "--ext", default="wav", required=False, help="Audio file extension" ) self.add_argument( "--no-copy-labels", action="store_true", help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.", ) self.add_argument( "--use-feat", action="store_true", help="Use the feature vector ('z') instead of context vector ('c') for features", ) self.add_argument("--gpu", help="GPU to use", default=0, type=int) class Prediction: """ Lightweight wrapper around a fairspeech embedding model """ def __init__(self, fname, gpu=0): self.gpu = gpu self.model = PretrainedWav2VecModel(fname).cuda(gpu) def __call__(self, x): x = torch.from_numpy(x).float().cuda(self.gpu) with torch.no_grad(): z, c = self.model(x.unsqueeze(0)) return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy() class H5Writer: """ Write features as hdf5 file in wav2letter++ compatible format """ def __init__(self, fname): self.fname = fname os.makedirs(os.path.dirname(self.fname), exist_ok=True) def write(self, data): channel, T = data.shape with h5py.File(self.fname, "w") as out_ds: data = data.T.flatten() out_ds["features"] = data out_ds["info"] = np.array([16e3 // 160, T, channel]) class EmbeddingDatasetWriter(object): """Given a model and a wav2letter++ dataset, pre-compute and store embeddings Args: input_root, str : Path to the wav2letter++ dataset output_root, str : Desired output directory. Will be created if non-existent split, str : Dataset split """ def __init__( self, input_root, output_root, split, model_fname, extension="wav", gpu=0, verbose=False, use_feat=False, ): assert os.path.exists(model_fname) self.model_fname = model_fname self.model = Prediction(self.model_fname, gpu) self.input_root = input_root self.output_root = output_root self.split = split self.verbose = verbose self.extension = extension self.use_feat = use_feat assert os.path.exists(self.input_path), "Input path '{}' does not exist".format( self.input_path ) def _progress(self, iterable, **kwargs): if self.verbose: return tqdm.tqdm(iterable, **kwargs) return iterable def require_output_path(self, fname=None): path = self.get_output_path(fname) os.makedirs(path, exist_ok=True) @property def input_path(self): return self.get_input_path() @property def output_path(self): return self.get_output_path() def get_input_path(self, fname=None): if fname is None: return os.path.join(self.input_root, self.split) return os.path.join(self.get_input_path(), fname) def get_output_path(self, fname=None): if fname is None: return os.path.join(self.output_root, self.split) return os.path.join(self.get_output_path(), fname) def copy_labels(self): self.require_output_path() labels = list( filter( lambda x: self.extension not in x, glob.glob(self.get_input_path("*")) ) ) for fname in tqdm.tqdm(labels): copy(fname, self.output_path) @property def input_fnames(self): return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension)))) def __len__(self): return len(self.input_fnames) def write_features(self): paths = self.input_fnames fnames_context = map( lambda x: os.path.join( self.output_path, x.replace("." + self.extension, ".h5context") ), map(os.path.basename, paths), ) for name, target_fname in self._progress( zip(paths, fnames_context), total=len(self) ): wav, sr = read_audio(name) z, c = self.model(wav) feat = z if self.use_feat else c writer = H5Writer(target_fname) writer.write(feat) def __repr__(self): return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format( n_files=len(self), **self.__dict__ ) if __name__ == "__main__": args = EmbeddingWriterConfig().parse_args() for split in args.split: writer = EmbeddingDatasetWriter( input_root=args.input, output_root=args.output, split=split, model_fname=args.model, gpu=args.gpu, extension=args.ext, use_feat=args.use_feat, ) print(writer) writer.require_output_path() print("Writing Features...") writer.write_features() print("Done.") if not args.no_copy_labels: print("Copying label data...") writer.copy_labels() print("Done.")
data2vec_vision-main
deltalm/src/examples/wav2vec/wav2vec_featurize.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data pre-processing: build vocabularies and binarize training data. """ import argparse import glob import os import random import soundfile def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( "root", metavar="DIR", help="root directory containing flac files to index" ) parser.add_argument( "--valid-percent", default=0.01, type=float, metavar="D", help="percentage of data to use as validation set (between 0 and 1)", ) parser.add_argument( "--dest", default=".", type=str, metavar="DIR", help="output directory" ) parser.add_argument( "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" ) parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") parser.add_argument( "--path-must-contain", default=None, type=str, metavar="FRAG", help="if set, path must contain this substring for a file to be included in the manifest", ) return parser def main(args): assert args.valid_percent >= 0 and args.valid_percent <= 1.0 if not os.path.exists(args.dest): os.makedirs(args.dest) dir_path = os.path.realpath(args.root) search_path = os.path.join(dir_path, "**/*." + args.ext) rand = random.Random(args.seed) with open(os.path.join(args.dest, "train.tsv"), "w") as train_f, open( os.path.join(args.dest, "valid.tsv"), "w" ) as valid_f: print(dir_path, file=train_f) print(dir_path, file=valid_f) for fname in glob.iglob(search_path, recursive=True): file_path = os.path.realpath(fname) if args.path_must_contain and args.path_must_contain not in file_path: continue frames = soundfile.info(fname).frames dest = train_f if rand.random() > args.valid_percent else valid_f print( "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest ) if __name__ == "__main__": parser = get_parser() args = parser.parse_args() main(args)
data2vec_vision-main
deltalm/src/examples/wav2vec/wav2vec_manifest.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import argparse import os def main(): parser = argparse.ArgumentParser() parser.add_argument("tsv") parser.add_argument("--output-dir", required=True) parser.add_argument("--output-name", required=True) args = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) transcriptions = {} with open(args.tsv, "r") as tsv, open( os.path.join(args.output_dir, args.output_name + ".ltr"), "w" ) as ltr_out, open( os.path.join(args.output_dir, args.output_name + ".wrd"), "w" ) as wrd_out: root = next(tsv).strip() for line in tsv: line = line.strip() dir = os.path.dirname(line) if dir not in transcriptions: parts = dir.split(os.path.sep) trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt" path = os.path.join(root, dir, trans_path) assert os.path.exists(path) texts = {} with open(path, "r") as trans_f: for tline in trans_f: items = tline.strip().split() texts[items[0]] = " ".join(items[1:]) transcriptions[dir] = texts part = os.path.basename(line).split(".")[0] assert part in transcriptions[dir] print(transcriptions[dir][part], file=wrd_out) print( " ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |", file=ltr_out, ) if __name__ == "__main__": main()
data2vec_vision-main
deltalm/src/examples/wav2vec/libri_labels.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import argparse import glob import os import os.path as osp import pprint import soundfile as sf import torch import fairseq from torch import nn from torch.utils.data import DataLoader try: import tqdm except: print("Install tqdm to use --log-format=tqdm") class FilesDataset: def __init__(self, files, labels): self.files = files if labels and osp.exists(labels): with open(labels, "r") as lbl_f: self.labels = [line.rstrip() for line in lbl_f] else: self.labels = labels def __len__(self): return len(self.files) def __getitem__(self, index): fname = self.files[index] wav, sr = sf.read(fname) assert sr == 16000 wav = torch.from_numpy(wav).float() lbls = None if self.labels: if isinstance(self.labels, str): lbl_file = osp.splitext(fname)[0] + "." + self.labels with open(lbl_file, "r") as lblf: lbls = lblf.readline() assert lbls is not None else: lbls = self.labels[index] return wav, lbls def collate(self, batch): return batch class ArgTypes: @staticmethod def existing_path(arg): arg = str(arg) assert osp.exists(arg), f"File {arg} does not exist" return arg @staticmethod def mkdir(arg): arg = str(arg) os.makedirs(arg, exist_ok=True) return arg class DatasetWriter: def __init__(self): self.args = self.load_config() pprint.pprint(self.args.__dict__) self.model = self.load_model() def __getattr__(self, attr): return getattr(self.args, attr) def read_manifest(self, fname): with open(fname, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() fnames = [ osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] return fnames def process_splits(self): if self.args.shard is not None or self.args.num_shards is not None: assert self.args.shard is not None and self.args.num_shards is not None for split in self.splits: print(split) if self.extension == "tsv": datadir = osp.join(self.data_dir, f"{split}.{self.extension}") print("Reading manifest file: ", datadir) files = self.read_manifest(datadir) else: datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}") files = glob.glob(datadir, recursive=True) assert len(files) > 0 if self.args.shard is not None: files = files[self.args.shard :: self.args.num_shards] lbls = [] with open(self.data_file(split), "w") as srcf: for line, lbl in self.iterate(files): print(line, file=srcf) if self.args.labels: lbls.append(lbl + "\n") if self.args.labels: assert all(a is not None for a in lbls) with open(self.lbl_file(split), "w") as lblf: lblf.writelines(lbls) def iterate(self, files): data = self.load_data(files) for samples in tqdm.tqdm(data, total=len(files) // 32): for wav, lbl in samples: x = wav.unsqueeze(0).float().cuda() div = 1 while x.size(-1) // div > self.args.max_size: div += 1 xs = x.chunk(div, dim=-1) result = [] for x in xs: torch.cuda.empty_cache() x = self.model.feature_extractor(x) if self.quantize_location == "encoder": with torch.no_grad(): _, idx = self.model.vector_quantizer.forward_idx(x) idx = idx.squeeze(0).cpu() else: with torch.no_grad(): z = self.model.feature_aggregator(x) _, idx = self.model.vector_quantizer.forward_idx(z) idx = idx.squeeze(0).cpu() result.append(idx) idx = torch.cat(result, dim=0) yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl def lbl_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.lbl{shard_part}") def data_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.src{shard_part}") def var_file(self): return osp.join(self.output_dir, f"vars.pt") def load_config(self): parser = argparse.ArgumentParser("Vector Quantized wav2vec features") # Model Arguments parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True) parser.add_argument("--data-parallel", action="store_true") # Output Arguments parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True) # Data Arguments parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True) parser.add_argument("--splits", type=str, nargs="+", required=True) parser.add_argument("--extension", type=str, required=True) parser.add_argument("--labels", type=str, required=False) parser.add_argument("--shard", type=int, default=None) parser.add_argument("--num-shards", type=int, default=None) parser.add_argument("--max-size", type=int, default=1300000) # Logger Arguments parser.add_argument( "--log-format", type=str, choices=["none", "simple", "tqdm"] ) return parser.parse_args() def load_data(self, fnames): dataset = FilesDataset(fnames, self.args.labels) loader = DataLoader( dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8 ) return loader def load_model(self): model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint]) model = model[0] self.quantize_location = getattr(cfg.model, "vq", "encoder") model.eval().float() model.cuda() if self.data_parallel: model = nn.DataParallel(model) return model def __call__(self): self.process_splits() if hasattr(self.model.feature_extractor, "vars") and ( self.args.shard is None or self.args.shard == 0 ): vars = ( self.model.feature_extractor.vars.view( self.model.feature_extractor.banks, self.model.feature_extractor.num_vars, -1, ) .cpu() .detach() ) print("writing learned latent variable embeddings: ", vars.shape) torch.save(vars, self.var_file()) if __name__ == "__main__": write_data = DatasetWriter() write_data() print("Done.")
data2vec_vision-main
deltalm/src/examples/wav2vec/vq-wav2vec_featurize.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import contextlib import sys from collections import Counter from multiprocessing import Pool from fairseq.data.encoders.gpt2_bpe import get_encoder def main(): """ Helper script to encode raw text with the GPT-2 BPE using multiple processes. The encoder.json and vocab.bpe files can be obtained here: - https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json - https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe """ parser = argparse.ArgumentParser() parser.add_argument( "--encoder-json", help="path to encoder.json", ) parser.add_argument( "--vocab-bpe", type=str, help="path to vocab.bpe", ) parser.add_argument( "--inputs", nargs="+", default=["-"], help="input files to filter/encode", ) parser.add_argument( "--outputs", nargs="+", default=["-"], help="path to save encoded outputs", ) parser.add_argument( "--keep-empty", action="store_true", help="keep empty lines", ) parser.add_argument("--workers", type=int, default=20) args = parser.parse_args() assert len(args.inputs) == len( args.outputs ), "number of input and output paths should match" with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8")) if input != "-" else sys.stdin for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8")) if output != "-" else sys.stdout for output in args.outputs ] encoder = MultiprocessingEncoder(args) pool = Pool(args.workers, initializer=encoder.initializer) encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100) stats = Counter() for i, (filt, enc_lines) in enumerate(encoded_lines, start=1): if filt == "PASS": for enc_line, output_h in zip(enc_lines, outputs): print(enc_line, file=output_h) else: stats["num_filtered_" + filt] += 1 if i % 10000 == 0: print("processed {} lines".format(i), file=sys.stderr) for k, v in stats.most_common(): print("[{}] filtered {} lines".format(k, v), file=sys.stderr) class MultiprocessingEncoder(object): def __init__(self, args): self.args = args def initializer(self): global bpe bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe) def encode(self, line): global bpe ids = bpe.encode(line) return list(map(str, ids)) def decode(self, tokens): global bpe return bpe.decode(tokens) def encode_lines(self, lines): """ Encode a set of lines. All lines will be encoded together. """ enc_lines = [] for line in lines: line = line.strip() if len(line) == 0 and not self.args.keep_empty: return ["EMPTY", None] tokens = self.encode(line) enc_lines.append(" ".join(tokens)) return ["PASS", enc_lines] def decode_lines(self, lines): dec_lines = [] for line in lines: tokens = map(int, line.strip().split()) dec_lines.append(self.decode(tokens)) return ["PASS", dec_lines] if __name__ == "__main__": main()
data2vec_vision-main
deltalm/src/examples/roberta/multiprocessing_bpe_encoder.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import re class InputExample: def __init__(self, paragraph, qa_list, label): self.paragraph = paragraph self.qa_list = qa_list self.label = label def get_examples(data_dir, set_type): """ Extract paragraph and question-answer list from each json file """ examples = [] levels = ["middle", "high"] set_type_c = set_type.split("-") if len(set_type_c) == 2: levels = [set_type_c[1]] set_type = set_type_c[0] for level in levels: cur_dir = os.path.join(data_dir, set_type, level) for filename in os.listdir(cur_dir): cur_path = os.path.join(cur_dir, filename) with open(cur_path, "r") as f: cur_data = json.load(f) answers = cur_data["answers"] options = cur_data["options"] questions = cur_data["questions"] context = cur_data["article"].replace("\n", " ") context = re.sub(r"\s+", " ", context) for i in range(len(answers)): label = ord(answers[i]) - ord("A") qa_list = [] question = questions[i] for j in range(4): option = options[i][j] if "_" in question: qa_cat = question.replace("_", option) else: qa_cat = " ".join([question, option]) qa_cat = re.sub(r"\s+", " ", qa_cat) qa_list.append(qa_cat) examples.append(InputExample(context, qa_list, label)) return examples def main(): """ Helper script to extract paragraphs questions and answers from RACE datasets. """ parser = argparse.ArgumentParser() parser.add_argument( "--input-dir", help="input directory for downloaded RACE dataset", ) parser.add_argument( "--output-dir", help="output directory for extracted data", ) args = parser.parse_args() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir, exist_ok=True) for set_type in ["train", "dev", "test-middle", "test-high"]: examples = get_examples(args.input_dir, set_type) qa_file_paths = [ os.path.join(args.output_dir, set_type + ".input" + str(i + 1)) for i in range(4) ] qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths] outf_context_path = os.path.join(args.output_dir, set_type + ".input0") outf_label_path = os.path.join(args.output_dir, set_type + ".label") outf_context = open(outf_context_path, "w") outf_label = open(outf_label_path, "w") for example in examples: outf_context.write(example.paragraph + "\n") for i in range(4): qa_files[i].write(example.qa_list[i] + "\n") outf_label.write(str(example.label) + "\n") for f in qa_files: f.close() outf_label.close() outf_context.close() if __name__ == "__main__": main()
data2vec_vision-main
deltalm/src/examples/roberta/preprocess_RACE.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json from functools import lru_cache def convert_sentence_to_json(sentence): if "_" in sentence: prefix, rest = sentence.split("_", 1) query, rest = rest.split("_", 1) query_index = len(prefix.rstrip().split(" ")) else: query, query_index = None, None prefix, rest = sentence.split("[", 1) pronoun, rest = rest.split("]", 1) pronoun_index = len(prefix.rstrip().split(" ")) sentence = sentence.replace("_", "").replace("[", "").replace("]", "") return { "idx": 0, "text": sentence, "target": { "span1_index": query_index, "span1_text": query, "span2_index": pronoun_index, "span2_text": pronoun, }, } def extended_noun_chunks(sentence): noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks} np_start, cur_np = 0, "NONE" for i, token in enumerate(sentence): np_type = token.pos_ if token.pos_ in {"NOUN", "PROPN"} else "NONE" if np_type != cur_np: if cur_np != "NONE": noun_chunks.add((np_start, i)) if np_type != "NONE": np_start = i cur_np = np_type if cur_np != "NONE": noun_chunks.add((np_start, len(sentence))) return [sentence[s:e] for (s, e) in sorted(noun_chunks)] def find_token(sentence, start_pos): found_tok = None for tok in sentence: if tok.idx == start_pos: found_tok = tok break return found_tok def find_span(sentence, search_text, start=0): search_text = search_text.lower() for tok in sentence[start:]: remainder = sentence[tok.i :].text.lower() if remainder.startswith(search_text): len_to_consume = len(search_text) start_idx = tok.idx for next_tok in sentence[tok.i :]: end_idx = next_tok.idx + len(next_tok.text) if end_idx - start_idx == len_to_consume: span = sentence[tok.i : next_tok.i + 1] return span return None @lru_cache(maxsize=1) def get_detokenizer(): from sacremoses import MosesDetokenizer detok = MosesDetokenizer(lang="en") return detok @lru_cache(maxsize=1) def get_spacy_nlp(): import en_core_web_lg nlp = en_core_web_lg.load() return nlp def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False): detok = get_detokenizer() nlp = get_spacy_nlp() with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) if positive_only and "label" in sample and not sample["label"]: # only consider examples where the query is correct continue target = sample["target"] # clean up the query query = target["span1_text"] if query is not None: if "\n" in query: continue if query.endswith(".") or query.endswith(","): query = query[:-1] # split tokens tokens = sample["text"].split(" ") def strip_pronoun(x): return x.rstrip('.,"') # find the pronoun pronoun_idx = target["span2_index"] pronoun = strip_pronoun(target["span2_text"]) if strip_pronoun(tokens[pronoun_idx]) != pronoun: # hack: sometimes the index is misaligned if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun: pronoun_idx += 1 else: raise Exception("Misaligned pronoun!") assert strip_pronoun(tokens[pronoun_idx]) == pronoun # split tokens before and after the pronoun before = tokens[:pronoun_idx] after = tokens[pronoun_idx + 1 :] # the GPT BPE attaches leading spaces to tokens, so we keep track # of whether we need spaces before or after the pronoun leading_space = " " if pronoun_idx > 0 else "" trailing_space = " " if len(after) > 0 else "" # detokenize before = detok.detokenize(before, return_str=True) pronoun = detok.detokenize([pronoun], return_str=True) after = detok.detokenize(after, return_str=True) # hack: when the pronoun ends in a period (or comma), move the # punctuation to the "after" part if pronoun.endswith(".") or pronoun.endswith(","): after = pronoun[-1] + trailing_space + after pronoun = pronoun[:-1] # hack: when the "after" part begins with a comma or period, remove # the trailing space if after.startswith(".") or after.startswith(","): trailing_space = "" # parse sentence with spacy sentence = nlp(before + leading_space + pronoun + trailing_space + after) # find pronoun span start = len(before + leading_space) first_pronoun_tok = find_token(sentence, start_pos=start) pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i) assert pronoun_span.text == pronoun if eval: # convert to format where pronoun is surrounded by "[]" and # query is surrounded by "_" query_span = find_span(sentence, query) query_with_ws = "_{}_{}".format( query_span.text, (" " if query_span.text_with_ws.endswith(" ") else ""), ) pronoun_with_ws = "[{}]{}".format( pronoun_span.text, (" " if pronoun_span.text_with_ws.endswith(" ") else ""), ) if query_span.start < pronoun_span.start: first = (query_span, query_with_ws) second = (pronoun_span, pronoun_with_ws) else: first = (pronoun_span, pronoun_with_ws) second = (query_span, query_with_ws) sentence = ( sentence[: first[0].start].text_with_ws + first[1] + sentence[first[0].end : second[0].start].text_with_ws + second[1] + sentence[second[0].end :].text ) yield sentence, sample.get("label", None) else: yield sentence, pronoun_span, query, sample.get("label", None) def winogrande_jsonl_iterator(input_fname, eval=False): with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) sentence, option1, option2 = ( sample["sentence"], sample["option1"], sample["option2"], ) pronoun_span = (sentence.index("_"), sentence.index("_") + 1) if eval: query, cand = option1, option2 else: query = option1 if sample["answer"] == "1" else option2 cand = option2 if sample["answer"] == "1" else option1 yield sentence, pronoun_span, query, cand def filter_noun_chunks( chunks, exclude_pronouns=False, exclude_query=None, exact_match=False ): if exclude_pronouns: chunks = [ np for np in chunks if (np.lemma_ != "-PRON-" and not all(tok.pos_ == "PRON" for tok in np)) ] if exclude_query is not None: excl_txt = [exclude_query.lower()] filtered_chunks = [] for chunk in chunks: lower_chunk = chunk.text.lower() found = False for excl in excl_txt: if ( not exact_match and (lower_chunk in excl or excl in lower_chunk) ) or lower_chunk == excl: found = True break if not found: filtered_chunks.append(chunk) chunks = filtered_chunks return chunks
data2vec_vision-main
deltalm/src/examples/roberta/wsc/wsc_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, register_criterion from fairseq.data import encoders @register_criterion("wsc") class WSCCriterion(LegacyFairseqCriterion): def __init__(self, args, task): super().__init__(args, task) if self.args.save_predictions is not None: self.prediction_h = open(self.args.save_predictions, "w") else: self.prediction_h = None self.bpe = encoders.build_bpe(args.bpe) self.tokenizer = encoders.build_tokenizer(args.tokenizer) def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0) parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0) parser.add_argument( "--wsc-cross-entropy", action="store_true", help="use cross entropy formulation instead of margin loss", ) parser.add_argument( "--save-predictions", metavar="FILE", help="file to save predictions to" ) def get_masked_input(self, tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask] = self.task.mask return masked_tokens def get_lprobs(self, model, tokens, mask): logits, _ = model(src_tokens=self.get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores def get_loss(self, query_lprobs, cand_lprobs): if self.args.wsc_cross_entropy: return F.cross_entropy( torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), query_lprobs.new([0]).long(), ) else: return ( -query_lprobs + self.args.wsc_margin_alpha * (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0) ).sum() def forward(self, model, sample, reduce=True): # compute loss and accuracy loss, nloss = 0.0, 0 ncorrect, nqueries = 0, 0 for i, label in enumerate(sample["labels"]): query_lprobs = self.get_lprobs( model, sample["query_tokens"][i].unsqueeze(0), sample["query_masks"][i].unsqueeze(0), ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"][i], sample["candidate_masks"][i], ) pred = (query_lprobs >= cand_lprobs).all().item() if label is not None: label = 1 if label else 0 ncorrect += 1 if pred == label else 0 nqueries += 1 if label: # only compute a loss for positive instances nloss += 1 loss += self.get_loss(query_lprobs, cand_lprobs) id = sample["id"][i].item() if self.prediction_h is not None: print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h) if nloss == 0: loss = torch.tensor(0.0, requires_grad=True) sample_size = nqueries if nqueries > 0 else 1 logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": nqueries, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2), "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) nqueries = sum(log.get("nqueries", 0) for log in logging_outputs) if nqueries > 0: agg_output["accuracy"] = ncorrect / float(nqueries) return agg_output @register_criterion("winogrande") class WinograndeCriterion(WSCCriterion): def forward(self, model, sample, reduce=True): # compute loss and accuracy query_lprobs = self.get_lprobs( model, sample["query_tokens"], sample["query_masks"], ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"], sample["candidate_masks"], ) pred = query_lprobs >= cand_lprobs loss = self.get_loss(query_lprobs, cand_lprobs) sample_size = sample["query_tokens"].size(0) ncorrect = pred.sum().item() logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": sample_size, } return loss, sample_size, logging_output
data2vec_vision-main
deltalm/src/examples/roberta/wsc/wsc_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import wsc_criterion # noqa from . import wsc_task # noqa
data2vec_vision-main
deltalm/src/examples/roberta/wsc/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import tempfile import numpy as np import torch import torch.nn.functional as F from fairseq import utils from fairseq.data import ( Dictionary, IdDataset, ListDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PadDataset, SortDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task from . import wsc_utils @register_task("wsc") class WSCTask(LegacyFairseqTask): """Task to finetune RoBERTa for Winograd Schemas.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", metavar="DIR", help="path to data directory; we load <split>.jsonl" ) parser.add_argument( "--init-token", type=int, default=None, help="add token at the beginning of each batch item", ) def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol("<mask>") self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) # hack to handle GPT-2 BPE, which includes leading spaces if args.bpe == "gpt2": self.leading_space = True self.trailing_space = False else: self.leading_space = False self.trailing_space = True @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == "wsc", "Must set --criterion=wsc" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def binarize(self, s: str, append_eos: bool = False): if self.tokenizer is not None: s = self.tokenizer.encode(s) if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=append_eos, add_if_not_exist=False, ).long() if self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): toks = self.binarize( prefix + leading_space + txt + trailing_space + suffix, append_eos=True, ) mask = torch.zeros_like(toks, dtype=torch.bool) mask_start = len(self.binarize(prefix)) mask_size = len(self.binarize(leading_space + txt)) mask[mask_start : mask_start + mask_size] = 1 return toks, mask def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] labels = [] for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path): prefix = sentence[: pronoun_span.start].text suffix = sentence[pronoun_span.end :].text_with_ws # spaCy spans include trailing spaces, but we need to know about # leading spaces for the GPT-2 BPE leading_space = ( " " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else "" ) trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else "" # get noun phrases, excluding pronouns and anything overlapping with the query cand_spans = wsc_utils.filter_noun_chunks( wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False, ) if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_masks = [], [] for cand_span in cand_spans: toks, mask = self.binarize_with_mask( cand_span.text, prefix, suffix, leading_space, trailing_space, ) cand_toks.append(toks) cand_masks.append(mask) # collate candidates cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) assert cand_toks.size() == cand_masks.size() candidate_tokens.append(cand_toks) candidate_masks.append(cand_masks) candidate_lengths.append(cand_toks.size(1)) labels.append(label) query_lengths = np.array(query_lengths) query_tokens = ListDataset(query_tokens, query_lengths) query_masks = ListDataset(query_masks, query_lengths) candidate_lengths = np.array(candidate_lengths) candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) candidate_masks = ListDataset(candidate_masks, candidate_lengths) labels = ListDataset(labels, [1] * len(labels)) dataset = { "id": IdDataset(), "query_tokens": query_tokens, "query_masks": query_masks, "candidate_tokens": candidate_tokens, "candidate_masks": candidate_masks, "labels": labels, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, sample_json): with tempfile.NamedTemporaryFile(buffering=0) as h: h.write((json.dumps(sample_json) + "\n").encode("utf-8")) dataset = self.load_dataset( "disambiguate_pronoun", data_path=h.name, return_only=True, ) return dataset def disambiguate_pronoun(self, model, sentence, use_cuda=False): sample_json = wsc_utils.convert_sentence_to_json(sentence) dataset = self.build_dataset_for_inference(sample_json) sample = dataset.collater([dataset[0]]) if use_cuda: sample = utils.move_to_cuda(sample) def get_masked_input(tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask.bool()] = self.mask return masked_tokens def get_lprobs(tokens, mask): logits, _ = model(src_tokens=get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores cand_lprobs = get_lprobs( sample["candidate_tokens"][0], sample["candidate_masks"][0], ) if sample["query_tokens"][0] is not None: query_lprobs = get_lprobs( sample["query_tokens"][0].unsqueeze(0), sample["query_masks"][0].unsqueeze(0), ) return (query_lprobs >= cand_lprobs).all().item() == 1 else: best_idx = cand_lprobs.argmax().item() full_cand = sample["candidate_tokens"][0][best_idx] mask = sample["candidate_masks"][0][best_idx] toks = full_cand[mask.bool()] return self.bpe.decode(self.source_dictionary.string(toks)).strip() @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab @register_task("winogrande") class WinograndeTask(WSCTask): """ Task for WinoGrande dataset. Efficient implementation for Winograd schema tasks with exactly two candidates, one of which is correct. """ @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == "winogrande", "Must set --criterion=winogrande" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test")) for sample in itr: sentence, pronoun_span, query, cand_text = sample prefix = sentence[: pronoun_span[0]].rstrip() suffix = sentence[pronoun_span[1] :] leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else "" trailing_space = "" if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space, ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_mask = self.binarize_with_mask( cand_text, prefix, suffix, leading_space, trailing_space, ) candidate_tokens.append(cand_toks) candidate_masks.append(cand_mask) candidate_lengths.append(cand_toks.size(0)) query_lengths = np.array(query_lengths) def get_pad_dataset_fn(tokens, length, pad_idx): return PadDataset( ListDataset(tokens, length), pad_idx=pad_idx, left_pad=False, ) query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad()) query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0) candidate_lengths = np.array(candidate_lengths) candidate_tokens = get_pad_dataset_fn( candidate_tokens, candidate_lengths, self.vocab.pad() ) candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0) dataset = { "id": IdDataset(), "query_tokens": query_tokens, "query_masks": query_masks, "candidate_tokens": candidate_tokens, "candidate_masks": candidate_masks, "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split]
data2vec_vision-main
deltalm/src/examples/roberta/wsc/wsc_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import commonsense_qa_task # noqa
data2vec_vision-main
deltalm/src/examples/roberta/commonsense_qa/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import numpy as np import torch from fairseq.data import ( Dictionary, IdDataset, ListDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, RawLabelDataset, RightPadDataset, SortDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task @register_task("commonsense_qa") class CommonsenseQATask(LegacyFairseqTask): """Task to finetune RoBERTa for Commonsense QA.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", metavar="DIR", help="path to data directory; we load <split>.jsonl" ) parser.add_argument( "--init-token", type=int, default=None, help="add token at the beginning of each batch item", ) parser.add_argument("--num-classes", type=int, default=5) def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol("<mask>") self.bpe = encoders.build_bpe(args) @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol("<mask>") return dictionary @classmethod def setup_task(cls, args, **kwargs): assert ( args.criterion == "sentence_ranking" ), "Must set --criterion=sentence_ranking" # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) print("| dictionary: {} types".format(len(vocab))) return cls(args, vocab) def load_dataset( self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ def binarize(s, append_bos=False): if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=True, add_if_not_exist=False, ).long() if append_bos and self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens if data_path is None: data_path = os.path.join(self.args.data, split + ".jsonl") if not os.path.exists(data_path): raise FileNotFoundError("Cannot find data: {}".format(data_path)) src_tokens = [[] for i in range(self.args.num_classes)] src_lengths = [[] for i in range(self.args.num_classes)] labels = [] with open(data_path) as h: for line in h: example = json.loads(line.strip()) if "answerKey" in example: label = ord(example["answerKey"]) - ord("A") labels.append(label) question = example["question"]["stem"] assert len(example["question"]["choices"]) == self.args.num_classes # format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>` question = "Q: " + question question_toks = binarize(question, append_bos=True) for i, choice in enumerate(example["question"]["choices"]): src = "A: " + choice["text"] src_bin = torch.cat([question_toks, binarize(src)]) src_tokens[i].append(src_bin) src_lengths[i].append(len(src_bin)) assert all( len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes) ) assert len(src_tokens[0]) == len(src_lengths[0]) assert len(labels) == 0 or len(labels) == len(src_tokens[0]) for i in range(self.args.num_classes): src_lengths[i] = np.array(src_lengths[i]) src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i]) src_lengths[i] = ListDataset(src_lengths[i]) dataset = { "id": IdDataset(), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_tokens[0], reduce=True), } for i in range(self.args.num_classes): dataset.update( { "net_input{}".format(i + 1): { "src_tokens": RightPadDataset( src_tokens[i], pad_idx=self.source_dictionary.pad(), ), "src_lengths": src_lengths[i], } } ) if len(labels) > 0: dataset.update({"target": RawLabelDataset(labels)}) dataset = NestedDictionaryDataset( dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], ) with data_utils.numpy_seed(self.args.seed): dataset = SortDataset( dataset, # shuffle sort_order=[np.random.permutation(len(dataset))], ) print("| Loaded {} with {} samples".format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head( "sentence_classification_head", num_classes=1, ) return model @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab
data2vec_vision-main
deltalm/src/examples/roberta/commonsense_qa/commonsense_qa_task.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import fileinput import sacremoses def main(): parser = argparse.ArgumentParser(description="") parser.add_argument("files", nargs="*", help="input files") args = parser.parse_args() detok = sacremoses.MosesDetokenizer() for line in fileinput.input(args.files, openhook=fileinput.hook_compressed): print( detok.detokenize(line.strip().split(" ")) .replace(" @", "") .replace("@ ", "") .replace(" =", "=") .replace("= ", "=") .replace(" – ", "–") ) if __name__ == "__main__": main()
data2vec_vision-main
deltalm/src/examples/megatron_11b/detok.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.sequence_generator import EnsembleModel def get_avg_pool( models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False ): model = EnsembleModel(models) # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32) encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype( np.float32 ) encoder_mask = np.expand_dims(encoder_mask.T, axis=2) if has_langtok: encoder_mask = encoder_mask[1:, :, :] np_encoder_outs = np_encoder_outs[1, :, :] masked_encoder_outs = encoder_mask * np_encoder_outs avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0) return avg_pool def main(args): assert args.path is not None, "--path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" args.beam = 1 utils.import_user_module(args) if args.max_tokens is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble print("| loading model(s) from {}".format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(":"), arg_overrides=eval(args.model_overrides), task=task, ) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_positions=utils.resolve_max_positions( task.max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) num_sentences = 0 source_sentences = [] shard_id = 0 all_avg_pool = None encoder_has_langtok = ( hasattr(task.args, "encoder_langtok") and task.args.encoder_langtok is not None and hasattr(task.args, "lang_tok_replacing_bos_eos") and not task.args.lang_tok_replacing_bos_eos ) with progress_bar.build_progress_bar(args, itr) as t: for sample in t: if sample is None: print("Skipping None") continue sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] with torch.no_grad(): avg_pool = get_avg_pool( models, sample, prefix_tokens, src_dict, args.post_process, has_langtok=encoder_has_langtok, ) if all_avg_pool is not None: all_avg_pool = np.concatenate((all_avg_pool, avg_pool)) else: all_avg_pool = avg_pool if not isinstance(sample["id"], list): sample_ids = sample["id"].tolist() else: sample_ids = sample["id"] for i, sample_id in enumerate(sample_ids): # Remove padding src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.post_process) else: src_str = "" if not args.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str)) source_sentences.append(f"{sample_id}\t{src_str}") num_sentences += sample["nsentences"] if all_avg_pool.shape[0] >= 1000000: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w", ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w", ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) all_avg_pool = None source_sentences = [] shard_id += 1 if all_avg_pool is not None: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w" ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w" ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) return None def cli_main(): parser = options.get_generation_parser() parser.add_argument( "--encoder-save-dir", default="", type=str, metavar="N", help="directory to save encoder outputs", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
data2vec_vision-main
deltalm/src/examples/criss/save_encoder.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob from subprocess import check_call try: import faiss has_faiss = True except ImportError: has_faiss = False import numpy as np GB = 1024 * 1024 * 1024 def call(cmd): print(cmd) check_call(cmd, shell=True) def get_batches(directory, lang, prefix="all_avg_pool"): print(f"Finding in {directory}/{prefix}.{lang}*") files = glob.glob(f"{directory}/{prefix}.{lang}*") emb_files = [] txt_files = [] for emb_fi in files: emb_files.append(emb_fi) txt_fi = emb_fi.replace(prefix, "sentences") txt_files.append(txt_fi) return emb_files, txt_files def load_batch(emb_file, dim): embeddings = np.fromfile(emb_file, dtype=np.float32) num_rows = int(embeddings.shape[0] / dim) embeddings = embeddings.reshape((num_rows, dim)) faiss.normalize_L2(embeddings) return embeddings def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction="x2y"): if not has_faiss: raise ImportError("Please install Faiss") sims = [] inds = [] xfrom = 0 xto = 0 for x_batch_f in x_batches_f: yfrom = 0 yto = 0 x_batch = load_batch(x_batch_f, dim) xto = xfrom + x_batch.shape[0] bsims, binds = [], [] for y_batch_f in y_batches_f: y_batch = load_batch(y_batch_f, dim) neighbor_size = min(k, y_batch.shape[0]) yto = yfrom + y_batch.shape[0] print("{}-{} -> {}-{}".format(xfrom, xto, yfrom, yto)) idx = faiss.IndexFlatIP(dim) idx = faiss.index_cpu_to_all_gpus(idx) idx.add(y_batch) bsim, bind = idx.search(x_batch, neighbor_size) bsims.append(bsim) binds.append(bind + yfrom) yfrom += y_batch.shape[0] del idx del y_batch bsims = np.concatenate(bsims, axis=1) binds = np.concatenate(binds, axis=1) aux = np.argsort(-bsims, axis=1) sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32) ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64) for i in range(x_batch.shape[0]): for j in range(k): sim_batch[i, j] = bsims[i, aux[i, j]] ind_batch[i, j] = binds[i, aux[i, j]] sims.append(sim_batch) inds.append(ind_batch) xfrom += x_batch.shape[0] del x_batch sim = np.concatenate(sims, axis=0) ind = np.concatenate(inds, axis=0) return sim, ind def score(sim, fwd_mean, bwd_mean, margin): return margin(sim, (fwd_mean + bwd_mean) / 2) def score_candidates( sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False ): print(" - scoring {:d} candidates".format(sim_mat.shape[0])) scores = np.zeros(candidate_inds.shape) for i in range(scores.shape[0]): for j in range(scores.shape[1]): k = int(candidate_inds[i, j]) scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin) return scores def load_text(files): all_sentences = [] for fi in files: with open(fi) as sentence_fi: for line in sentence_fi: all_sentences.append(line.strip()) print(f"Read {len(all_sentences)} sentences") return all_sentences if __name__ == "__main__": parser = argparse.ArgumentParser(description="Mine bitext") parser.add_argument("--src-lang", help="Source language") parser.add_argument("--tgt-lang", help="Target language") parser.add_argument( "--dict-path", help="Path to dictionary file", default="dict.txt" ) parser.add_argument( "--spm-path", help="Path to SPM model file", default="sentence.bpe.model" ) parser.add_argument("--dim", type=int, default=1024, help="Embedding dimension") parser.add_argument("--mem", type=int, default=5, help="Memory in GB") parser.add_argument("--src-dir", help="Source directory") parser.add_argument("--tgt-dir", help="Target directory") parser.add_argument("--output", help="Output path") parser.add_argument( "--neighborhood", type=int, default=4, help="Embedding dimension" ) parser.add_argument( "--threshold", type=float, default=1.06, help="Threshold on mined bitext" ) parser.add_argument( "--valid-size", type=int, default=2000, help="Number of sentences used for validation set", ) parser.add_argument( "--min-count", type=int, default=50000, help="Min num sentences used for each language", ) args = parser.parse_args() x_batches_f, x_sents_f = get_batches(args.src_dir, args.src_lang) y_batches_f, y_sents_f = get_batches(args.tgt_dir, args.tgt_lang) margin = lambda a, b: a / b y2x_sim, y2x_ind = knnGPU_sharded( y_batches_f, x_batches_f, args.dim, args.neighborhood, direction="y2x" ) x2y_sim, x2y_ind = knnGPU_sharded( x_batches_f, y_batches_f, args.dim, args.neighborhood, direction="x2y" ) x2y_mean = x2y_sim.mean(axis=1) y2x_mean = y2x_sim.mean(axis=1) fwd_scores = score_candidates(x2y_sim, x2y_ind, x2y_mean, y2x_mean, margin) bwd_scores = score_candidates(y2x_sim, y2x_ind, y2x_mean, x2y_mean, margin) fwd_best = x2y_ind[np.arange(x2y_sim.shape[0]), fwd_scores.argmax(axis=1)] bwd_best = y2x_ind[np.arange(y2x_sim.shape[0]), bwd_scores.argmax(axis=1)] indices = np.stack( ( np.concatenate((np.arange(x2y_ind.shape[0]), bwd_best)), np.concatenate((fwd_best, np.arange(y2x_ind.shape[0]))), ), axis=1, ) scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1))) x_sentences = load_text(x_sents_f) y_sentences = load_text(y_sents_f) threshold = args.threshold min_count = args.min_count seen_src, seen_trg = set(), set() directory = args.output call(f"mkdir -p {directory}") src_out = open( f"{directory}/all.{args.src_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) tgt_out = open( f"{directory}/all.{args.tgt_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) scores_out = open( f"{directory}/all.scores", mode="w", encoding="utf-8", errors="surrogateescape" ) count = 0 for i in np.argsort(-scores): src_ind, trg_ind = indices[i] if src_ind not in seen_src and trg_ind not in seen_trg: seen_src.add(src_ind) seen_trg.add(trg_ind) if scores[i] > threshold or count < min_count: if x_sentences[src_ind]: print(scores[i], file=scores_out) print(x_sentences[src_ind], file=src_out) print(y_sentences[trg_ind], file=tgt_out) count += 1 else: print(f"Ignoring sentence: {x_sentences[src_ind]}") src_out.close() tgt_out.close() scores_out.close() print(f"Found {count} pairs for threshold={threshold}") with open(f"{directory}/all.{args.src_lang}") as all_s, open( f"{directory}/all.{args.tgt_lang}" ) as all_t, open(f"{directory}/valid.{args.src_lang}", "w") as valid_s, open( f"{directory}/valid.{args.tgt_lang}", "w" ) as valid_t, open( f"{directory}/train.{args.src_lang}", "w" ) as train_s, open( f"{directory}/train.{args.tgt_lang}", "w" ) as train_t: count = 0 for s_line, t_line in zip(all_s, all_t): s_line = s_line.split("\t")[1] t_line = t_line.split("\t")[1] if count >= args.valid_size: train_s.write(s_line) train_t.write(t_line) else: valid_s.write(s_line) valid_t.write(t_line) count += 1
data2vec_vision-main
deltalm/src/examples/criss/mining/mine.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob import numpy as np DIM = 1024 def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False): target_ids = [tid for tid in target_embs] source_mat = np.stack(source_embs.values(), axis=0) normalized_source_mat = source_mat / np.linalg.norm( source_mat, axis=1, keepdims=True ) target_mat = np.stack(target_embs.values(), axis=0) normalized_target_mat = target_mat / np.linalg.norm( target_mat, axis=1, keepdims=True ) sim_mat = normalized_source_mat.dot(normalized_target_mat.T) if return_sim_mat: return sim_mat neighbors_map = {} for i, sentence_id in enumerate(source_embs): idx = np.argsort(sim_mat[i, :])[::-1][:k] neighbors_map[sentence_id] = [target_ids[tid] for tid in idx] return neighbors_map def load_embeddings(directory, LANGS): sentence_embeddings = {} sentence_texts = {} for lang in LANGS: sentence_embeddings[lang] = {} sentence_texts[lang] = {} lang_dir = f"{directory}/{lang}" embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*") for embed_file in embedding_files: shard_id = embed_file.split(".")[-1] embeddings = np.fromfile(embed_file, dtype=np.float32) num_rows = embeddings.shape[0] // DIM embeddings = embeddings.reshape((num_rows, DIM)) with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file: for idx, line in enumerate(sentence_file): sentence_id, sentence = line.strip().split("\t") sentence_texts[lang][sentence_id] = sentence sentence_embeddings[lang][sentence_id] = embeddings[idx, :] return sentence_embeddings, sentence_texts def compute_accuracy(directory, LANGS): sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS) top_1_accuracy = {} top1_str = " ".join(LANGS) + "\n" for source_lang in LANGS: top_1_accuracy[source_lang] = {} top1_str += f"{source_lang} " for target_lang in LANGS: top1 = 0 top5 = 0 neighbors_map = compute_dist( sentence_embeddings[source_lang], sentence_embeddings[target_lang] ) for sentence_id, neighbors in neighbors_map.items(): if sentence_id == neighbors[0]: top1 += 1 if sentence_id in neighbors[:5]: top5 += 1 n = len(sentence_embeddings[target_lang]) top1_str += f"{top1/n} " top1_str += "\n" print(top1_str) print(top1_str, file=open(f"{directory}/accuracy", "w")) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Analyze encoder outputs") parser.add_argument("directory", help="Source language corpus") parser.add_argument("--langs", help="List of langs") args = parser.parse_args() langs = args.langs.split(",") compute_accuracy(args.directory, langs)
data2vec_vision-main
deltalm/src/examples/criss/sentence_retrieval/encoder_analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.search import Search class NoisyChannelBeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.fw_scores_buf = None self.lm_scores_buf = None def _init_buffers(self, t): # super()._init_buffers(t) if self.fw_scores_buf is None: self.scores_buf = t.new() self.indices_buf = torch.LongTensor().to(device=t.device) self.beams_buf = torch.LongTensor().to(device=t.device) self.fw_scores_buf = t.new() self.lm_scores_buf = t.new() def combine_fw_bw(self, combine_method, fw_cum, bw, step): if combine_method == "noisy_channel": fw_norm = fw_cum.div(step + 1) lprobs = bw + fw_norm elif combine_method == "lm_only": lprobs = bw + fw_cum return lprobs def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method): self._init_buffers(fw_lprobs) bsz, beam_size, vocab_size = fw_lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous() bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous() # nothing to add since we are at the first step fw_lprobs_cum = fw_lprobs else: # make probs contain cumulative scores for each hypothesis raw_scores = (scores[:, :, step - 1].unsqueeze(-1)) fw_lprobs_cum = (fw_lprobs.add(raw_scores)) combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step) # choose the top k according to the combined noisy channel model score torch.topk( combined_lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), out=(self.scores_buf, self.indices_buf), ) # save corresponding fw and lm scores self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf) self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf) # Project back into relative indices and beams self.beams_buf = self.indices_buf // vocab_size self.indices_buf.fmod_(vocab_size) return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
data2vec_vision-main
deltalm/src/examples/fast_noisy_channel/noisy_channel_beam_search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import noisy_channel_translation # noqa from . import noisy_channel_sequence_generator # noqa from . import noisy_channel_beam_search # noqa
data2vec_vision-main
deltalm/src/examples/fast_noisy_channel/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import math import numpy as np import torch import torch.nn.functional as F from torch import Tensor from .noisy_channel_beam_search import NoisyChannelBeamSearch from fairseq.sequence_generator import EnsembleModel class NoisyChannelSequenceGenerator(object): def __init__( self, combine_method, tgt_dict, src_dict=None, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, len_penalty=1.0, unk_penalty=0.0, retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, normalize_scores=True, channel_models=None, k2=10, ch_weight=1.0, channel_scoring_type='log_norm', top_k_vocab=0, lm_models=None, lm_dict=None, lm_weight=1.0, normalize_lm_scores_by_tgt_len=False, ): """Generates translations of a given source sentence, using beam search with noisy channel decoding. Args: combine_method (string, optional): Method to combine direct, LM and channel model scores (default: None) tgt_dict (~fairseq.data.Dictionary): target dictionary src_dict (~fairseq.data.Dictionary): source dictionary beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) no_repeat_ngram_size (int, optional): Size of n-grams that we avoid repeating in the generation (default: 0) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) channel_models (List[~fairseq.models.FairseqModel]): ensemble of models translating from the target to the source k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) ch_weight (int, optional): Weight associated with the channel model score assuming that the direct model score has weight 1.0 (default: 1.0) channel_scoring_type (str, optional): String specifying how to score the channel model (default: 'log_norm') top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or `'src_vocab_batched'`, then this parameter specifies the number of most frequent tokens to include in the channel model output vocabulary, in addition to the source tokens in the input batch (default: 0) lm_models (List[~fairseq.models.FairseqModel]): ensemble of models generating text in the target language lm_dict (~fairseq.data.Dictionary): LM Model dictionary lm_weight (int, optional): Weight associated with the LM model score assuming that the direct model score has weight 1.0 (default: 1.0) normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores by the target length? By default, we normalize the combination of LM and channel model scores by the source length """ self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size self.channel_models = channel_models self.src_dict = src_dict self.tgt_dict = tgt_dict self.combine_method = combine_method self.k2 = k2 self.ch_weight = ch_weight self.channel_scoring_type = channel_scoring_type self.top_k_vocab = top_k_vocab self.lm_models = lm_models self.lm_dict = lm_dict self.lm_weight = lm_weight self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len self.share_tgt_dict = (self.lm_dict == self.tgt_dict) self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) self.ch_scoring_bsz = 3072 assert temperature > 0, '--temperature must be greater than 0' self.search = NoisyChannelBeamSearch(tgt_dict) @torch.no_grad() def generate( self, models, sample, prefix_tokens=None, bos_token=None, **kwargs ): """Generate a batch of translations. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens """ model = EnsembleModel(models) incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(model.models_size) ], ) if not self.retain_dropout: model.eval() # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample['net_input'].items() if k != 'prev_output_tokens' } src_tokens = encoder_input['src_tokens'] src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) input_size = src_tokens.size() # batch dimension goes first followed by source lengths bsz = input_size[0] src_len = input_size[1] beam_size = self.beam_size if self.match_source_len: max_len = src_lengths_no_eos.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker model.max_decoder_positions() - 1, ) # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) src_lengths = encoder_input['src_lengths'] # initialize buffers scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) scores_buf = scores.clone() tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) tokens_buf = tokens.clone() tokens[:, 0] = self.eos if bos_token is None else bos_token # reorder source tokens so they may be used as a reference in generating P(S|T) src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) attn, attn_buf = None, None nonpad_idxs = None # The cands_to_ignore indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then the cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask # list of completed sentences finalized = [[] for i in range(bsz)] finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) # helper function for allocating buffers on the fly buffers = {} def buffer(name, type_of=tokens): # noqa if name not in buffers: buffers[name] = type_of.new() return buffers[name] def is_finished(sent, step, unfin_idx): """ Check whether we've finished generation for a given sentence, by comparing the worst score among finalized hypotheses to the best possible score among unfinalized hypotheses. """ assert len(finalized[sent]) <= beam_size if len(finalized[sent]) == beam_size: return True return False def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): """ Finalize the given hypotheses at this step, while keeping the total number of finalized hypotheses per sentence <= beam_size. Note: the input must be in the desired finalization order, so that hypotheses that appear earlier in the input are preferred to those that appear later. Args: step: current time step bbsz_idx: A vector of indices in the range [0, bsz*beam_size), indicating which hypotheses to finalize eos_scores: A vector of the same size as bbsz_idx containing fw scores for each hypothesis combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing combined noisy channel scores for each hypothesis """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors tokens_clone = tokens.index_select(0, bbsz_idx) tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS assert not tokens_clone.eq(self.eos).any() tokens_clone[:, step] = self.eos attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty cum_unfin = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) sents_seen = set() for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): unfin_idx = idx // beam_size sent = unfin_idx + cum_unfin[unfin_idx] sents_seen.add((sent, unfin_idx)) if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: score = -math.inf def get_hypo(): if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i][nonpad_idxs[sent]] _, alignment = hypo_attn.max(dim=0) else: hypo_attn = None alignment = None return { 'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, # src_len x tgt_len 'alignment': alignment, 'positional_scores': pos_scores[i], } if len(finalized[sent]) < beam_size: finalized[sent].append(get_hypo()) newly_finished = [] for sent, unfin_idx in sents_seen: # check termination conditions for this sentence if not finished[sent] and is_finished(sent, step, unfin_idx): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): """Rescore the top k hypothesis from each beam using noisy channel modeling Returns: new_fw_lprobs: the direct model probabilities after pruning the top k new_ch_lm_lprobs: the combined channel and language model probabilities new_lm_lprobs: the language model probabilities after pruning the top k """ with torch.no_grad(): lprobs_size = lprobs.size() if prefix_tokens is not None and step < prefix_tokens.size(1): probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] cand_scores = torch.gather( probs_slice, dim=1, index=prefix_tokens[:, step].view(-1, 1).data ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) # need to calculate and save fw and lm probs for prefix tokens fw_top_k = cand_scores fw_top_k_idx = cand_indices k = 1 else: # take the top k best words for every sentence in batch*beam fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) if self.combine_method != "lm_only": temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index cur_tgt_size = step+2 # add eos to all candidate sentences except those that already end in eos eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) eos_tokens[eos_idx] = self.tgt_dict.pad_index if step == 0: channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) else: # move eos from beginning to end of target sentence channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) ch_input_lengths[eos_idx] = cur_tgt_size-1 if self.channel_scoring_type == "unnormalized": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) ch_intermed_scores = ch_intermed_scores.float() ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) elif self.channel_scoring_type == "k2_separate": for k_idx in range(k): k_eos_tokens = eos_tokens[k_idx::k, :] if step == 0: k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) else: # move eos from beginning to end of target sentence k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) k_ch_input_lengths = ch_input_lengths[k_idx::k] k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) k_ch_intermed_scores *= not_padding.float() ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) elif self.channel_scoring_type == "src_vocab": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_lprobs = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab) ch_scores = torch.sum(ch_lprobs, dim=1) elif self.channel_scoring_type == "src_vocab_batched": ch_bsz_size = temp_src_tokens_full.shape[0] ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] channel_input_batch = channel_input[start_idx:end_idx, :] ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) ch_lprobs_list[i] = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output_batch, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab, start_idx=start_idx, end_idx=end_idx) ch_lprobs = torch.cat(ch_lprobs_list, dim=0) ch_scores = torch.sum(ch_lprobs, dim=1) else: ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) else: cur_tgt_size = 0 ch_scores = ch_scores.view(bsz*beam_size, k) expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() if self.share_tgt_dict: lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) else: new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) lm_scores.add_(expanded_lm_prefix_scores) ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) # initialize all as min value new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_fw_lprobs[:, self.pad] = -math.inf new_ch_lm_lprobs[:, self.pad] = -math.inf new_lm_lprobs[:, self.pad] = -math.inf new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): if self.channel_scoring_type == "unnormalized": ch_scores = self.log_softmax_fn( ch_scores.view(-1, self.beam_size * self.k2) ).view(ch_scores.shape) ch_scores = ch_scores * self.ch_weight lm_scores1 = lm_scores1 * self.lm_weight if combine_type == "lm_only": # log P(T|S) + log P(T) ch_scores = lm_scores1.view(ch_scores.size()) elif combine_type == "noisy_channel": # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) if self.normalize_lm_scores_by_tgt_len: ch_scores.div_(src_size) lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) ch_scores.add_(lm_scores_norm) # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) else: ch_scores.add_(lm_scores1.view(ch_scores.size())) ch_scores.div_(src_size) return ch_scores if self.channel_models is not None: channel_model = self.channel_models[0] # assume only one channel_model model else: channel_model = None lm = EnsembleModel(self.lm_models) lm_incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(lm.models_size) ], ) reorder_state = None batch_idxs = None for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) lm.reorder_incremental_state(lm_incremental_states, reorder_state) fw_lprobs, avg_attn_scores = model.forward_decoder( tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, ) fw_lprobs[:, self.pad] = -math.inf # never select pad fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) # handle min and max length constraints if step >= max_len: fw_lprobs[:, :self.eos] = -math.inf fw_lprobs[:, self.eos + 1:] = -math.inf elif step < self.min_len: fw_lprobs[:, self.eos] = -math.inf # handle prefix tokens (possibly with different lengths) if prefix_tokens is not None and step < prefix_tokens.size(1): prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_mask = prefix_toks.ne(self.pad) prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) fw_lprobs[prefix_mask] = -math.inf fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs ) prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) ch_lm_lprobs[prefix_mask] = -math.inf ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs ) prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) lm_lprobs[prefix_mask] = -math.inf lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() def replicate_first_beam(tensor, mask): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) # copy tokens, scores and lprobs from the first beam to all beams tokens = replicate_first_beam(tokens, eos_mask_batch_dim) scores = replicate_first_beam(scores, eos_mask_batch_dim) fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) if self.no_repeat_ngram_size > 0: # for each beam and batch sentence, generate a list of previous ngrams gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): gen_tokens = tokens[bbsz_idx].tolist() for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] # Record attention scores if avg_attn_scores is not None: if attn is None: attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) attn_buf = attn.clone() nonpad_idxs = src_tokens.ne(self.pad) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(fw_lprobs) scores_buf = scores_buf.type_as(fw_lprobs) self.search.set_src_lengths(src_lengths_no_eos) if self.no_repeat_ngram_size > 0: def calculate_banned_tokens(bbsz_idx): # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) return gen_ngrams[bbsz_idx].get(ngram_index, []) if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] else: banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( step, fw_lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos (except for candidates to be ignored) eos_mask = cand_indices.eq(self.eos) eos_mask[:, :beam_size] &= ~cands_to_ignore # only consider eos when it's among the top beam_size indices eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = set() if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] ) combined_noisy_channel_eos_scores = torch.masked_select( combined_noisy_channel_scores[:, :beam_size], mask=eos_mask[:, :beam_size], ) # finalize hypo using channel model score finalized_sents = finalize_hypos( step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = cand_indices.new_ones(bsz) batch_mask[cand_indices.new(finalized_sents)] = 0 batch_idxs = torch.nonzero(batch_mask).squeeze(-1) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths_no_eos = src_lengths_no_eos[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) scores_buf.resize_as_(scores) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens_buf.resize_as_(tokens) src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) attn_buf.resize_as_(attn) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos or # ignored hypos and values < cand_size indicate candidate # active hypos. After this, the min values per row are the top # candidate active hypos. eos_mask[:, :beam_size] |= cands_to_ignore active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just the hypos # with the smallest values in active_mask active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') torch.topk( active_mask, k=beam_size, dim=1, largest=False, out=(new_cands_to_ignore, active_hypos) ) # update cands_to_ignore to ignore any finalized hypos cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] assert (~cands_to_ignore).any(dim=1).all() active_bbsz_idx = buffer('active_bbsz_idx') torch.gather( cand_bbsz_idx, dim=1, index=active_hypos, out=active_bbsz_idx, ) active_scores = torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores[:, step].view(bsz, beam_size), ) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses torch.index_select( tokens[:, :step + 1], dim=0, index=active_bbsz_idx, out=tokens_buf[:, :step + 1], ) torch.gather( cand_indices, dim=1, index=active_hypos, out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], ) if step > 0: torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx, out=scores_buf[:, :step], ) torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores_buf.view(bsz, beam_size, -1)[:, :, step], ) torch.gather( lm_lprobs_top_k, dim=1, index=active_hypos, out=lm_prefix_scores.view(bsz, beam_size) ) # copy attention for active hypotheses if attn is not None: torch.index_select( attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, out=attn_buf[:, :, :step + 2], ) # swap buffers tokens, tokens_buf = tokens_buf, tokens scores, scores_buf = scores_buf, scores if attn is not None: attn, attn_buf = attn_buf, attn # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) return finalized def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): with torch.no_grad(): lm_lprobs, avg_attn_scores = model.forward_decoder( input_tokens, encoder_outs=None, incremental_states=incremental_states, ) lm_lprobs_size = lm_lprobs.size(0) probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) return probs_next_wrd def make_dict2dict(old_dict, new_dict): dict2dict_map = {} for sym in old_dict.symbols: dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) return dict2dict_map def dict2dict(tokens, dict2dict_map): if tokens.device == torch.device('cpu'): tokens_tmp = tokens else: tokens_tmp = tokens.cpu() return tokens_tmp.map_( tokens_tmp, lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] ).to(tokens.device) def reorder_tokens(tokens, lengths, eos): # reorder source tokens so they may be used as reference for P(S|T) return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) def reorder_all_tokens(tokens, lengths, eos): # used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>] # so source tokens can be used to predict P(S|T) return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) def normalized_scores_with_batch_vocab( model_decoder, features, target_ids, k, bsz, beam_size, pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, end_idx=None, **kwargs): """ Get normalized probabilities (or log probs) from a net's output w.r.t. vocab consisting of target IDs in the batch """ if model_decoder.adaptive_softmax is None: weight = model_decoder.output_projection.weight vocab_ids = torch.unique( torch.cat( (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) ) ) id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) mapped_target_ids = target_ids.cpu().apply_( lambda x, id_map=id_map: id_map[x] ).to(target_ids.device) expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) if start_idx is not None and end_idx is not None: expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] logits = F.linear(features, weight[vocab_ids, :]) log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) intermed_scores = torch.gather( log_softmax[:, :-1, :], 2, expanded_target_ids[:, 1:].unsqueeze(2), ).squeeze() not_padding = expanded_target_ids[:, 1:] != pad_idx intermed_scores *= not_padding.float() return intermed_scores else: raise ValueError("adaptive softmax doesn't work with " + "`normalized_scores_with_batch_vocab()`")
data2vec_vision-main
deltalm/src/examples/fast_noisy_channel/noisy_channel_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks.translation import TranslationTask from fairseq.tasks.language_modeling import LanguageModelingTask from fairseq import checkpoint_utils import argparse from fairseq.tasks import register_task import torch @register_task("noisy_channel_translation") class NoisyChannelTranslation(TranslationTask): """ Rescore the top k candidates from each beam using noisy channel modeling """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" TranslationTask.add_args(parser) # fmt: off parser.add_argument('--channel-model', metavar='FILE', help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') parser.add_argument('--combine-method', default='lm_only', choices=['lm_only', 'noisy_channel'], help="""method for combining direct and channel model scores. lm_only: decode with P(T|S)P(T) noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, help='normalize lm score by target length instead of source length') parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], help="Normalize bw scores with log softmax or return bw scores without log softmax") parser.add_argument('--top-k-vocab', default=0, type=int, help='top k vocab IDs to use with `src_vocab` in channel model scoring') parser.add_argument('--k2', default=50, type=int, help='the top k2 candidates to rescore with the noisy channel model for each beam') parser.add_argument('--ch-wt', default=1, type=float, help='weight for the channel model') parser.add_argument('--lm-model', metavar='FILE', help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') parser.add_argument('--lm-data', metavar='FILE', help='path to lm model training data for target language, used to properly load LM with correct dictionary') parser.add_argument('--lm-wt', default=1, type=float, help='the weight of the lm in joint decoding') # fmt: on def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if getattr(args, "score_reference", False): raise NotImplementedError() else: from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator use_cuda = torch.cuda.is_available() and not self.args.cpu assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' if self.args.channel_model is not None: import copy ch_args_task = copy.deepcopy(self.args) tmp = ch_args_task.source_lang ch_args_task.source_lang = ch_args_task.target_lang ch_args_task.target_lang = tmp ch_args_task._name = 'translation' channel_task = TranslationTask.setup_task(ch_args_task) arg_dict = {} arg_dict['task'] = 'language_modeling' arg_dict['sample_break_mode'] = 'eos' arg_dict['data'] = self.args.lm_data arg_dict['output_dictionary_size'] = -1 lm_args = argparse.Namespace(**arg_dict) lm_task = LanguageModelingTask.setup_task(lm_args) lm_dict = lm_task.output_dictionary if self.args.channel_model is not None: channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) for model in channel_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() else: channel_models = None lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) for model in lm_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() return NoisyChannelSequenceGenerator( combine_method=self.args.combine_method, tgt_dict=self.target_dictionary, src_dict=self.source_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), normalize_scores=(not getattr(args, 'unnormalized', False)), channel_models=channel_models, k2=getattr(self.args, 'k2', 50), ch_weight=getattr(self.args, 'ch_wt', 1), channel_scoring_type=self.args.channel_scoring_type, top_k_vocab=self.args.top_k_vocab, lm_models=lm_models, lm_dict=lm_dict, lm_weight=getattr(self.args, 'lm_wt', 1), normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), )
data2vec_vision-main
deltalm/src/examples/fast_noisy_channel/noisy_channel_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as op from collections import namedtuple from multiprocessing import cpu_count from typing import List, Optional import sentencepiece as sp from fairseq.data.encoders.byte_bpe import ByteBPE from fairseq.data.encoders.byte_utils import byte_encode from fairseq.data.encoders.bytes import Bytes from fairseq.data.encoders.characters import Characters from fairseq.data.encoders.moses_tokenizer import MosesTokenizer from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE SPLITS = ["train", "valid", "test"] def _convert_xml(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if not ss.startswith("<seg"): continue ss = ss.replace("</seg>", "").split('">') assert len(ss) == 2 f_o.write(ss[1].strip() + "\n") def _convert_train(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if ss.startswith("<"): continue f_o.write(ss.strip() + "\n") def _get_bytes(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Bytes.encode(s.strip()) + "\n") def _get_chars(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Characters.encode(s.strip()) + "\n") def pretokenize(in_path: str, out_path: str, src: str, tgt: str): Args = namedtuple( "Args", [ "moses_source_lang", "moses_target_lang", "moses_no_dash_splits", "moses_no_escape", ], ) args = Args( moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False, ) pretokenizer = MosesTokenizer(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(pretokenizer.encode(s.strip()) + "\n") def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str): with open(out_path, "w") as f_o: for lang in [src, tgt]: with open(f"{in_path_prefix}.{lang}") as f: for s in f: f_o.write(byte_encode(s.strip()) + "\n") def _get_bpe(in_path: str, model_prefix: str, vocab_size: int): arguments = [ f"--input={in_path}", f"--model_prefix={model_prefix}", f"--model_type=bpe", f"--vocab_size={vocab_size}", "--character_coverage=1.0", "--normalization_rule_name=identity", f"--num_threads={cpu_count()}", ] sp.SentencePieceTrainer.Train(" ".join(arguments)) def _apply_bbpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model_path"]) args = Args(sentencepiece_model_path=model_path) tokenizer = ByteBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model"]) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _concat_files(in_paths: List[str], out_path: str): with open(out_path, "w") as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r) def preprocess_iwslt17( root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool, ): # extract bitext in_root = op.join(root, f"{src}-{tgt}") for lang in [src, tgt]: _convert_train( op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"), op.join(root, f"train.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"), op.join(root, f"valid.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"), op.join(root, f"test.{lang}"), ) # pre-tokenize for lang in [src, tgt]: for split in SPLITS: pretokenize( op.join(root, f"{split}.{lang}"), op.join(root, f"{split}.moses.{lang}"), src, tgt, ) # tokenize with BPE vocabulary if bpe_size is not None: # learn vocabulary concated_train_path = op.join(root, "train.all") _concat_files( [op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")], concated_train_path, ) bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}") _get_bpe(concated_train_path, bpe_model_prefix, bpe_size) os.remove(concated_train_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bpe( bpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"), ) # tokenize with bytes vocabulary if need_bytes: for lang in [src, tgt]: for split in SPLITS: _get_bytes( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bytes.{lang}"), ) # tokenize with characters vocabulary if need_chars: for lang in [src, tgt]: for split in SPLITS: _get_chars( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.chars.{lang}"), ) # tokenize with byte-level BPE vocabulary if bbpe_size is not None: # learn vocabulary bchar_path = op.join(root, "train.bchar") _convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path) bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}") _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size) os.remove(bchar_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bbpe( bbpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"), ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default="data") parser.add_argument( "--bpe-vocab", default=None, type=int, help="Generate tokenized bitext with BPE of size K." "Default to None (disabled).", ) parser.add_argument( "--bbpe-vocab", default=None, type=int, help="Generate tokenized bitext with BBPE of size K." "Default to None (disabled).", ) parser.add_argument( "--byte-vocab", action="store_true", help="Generate tokenized bitext with bytes vocabulary", ) parser.add_argument( "--char-vocab", action="store_true", help="Generate tokenized bitext with chars vocabulary", ) args = parser.parse_args() preprocess_iwslt17( args.root, "fr", "en", args.bpe_vocab, args.char_vocab, args.bbpe_vocab, args.byte_vocab, ) if __name__ == "__main__": main()
data2vec_vision-main
deltalm/src/examples/byte_level_bpe/get_bitext.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerEncoder, TransformerModel @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU( input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True, ) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
data2vec_vision-main
deltalm/src/examples/byte_level_bpe/gru_transformer.py
#!/usr/bin/env python3 from setuptools import find_packages, setup setup( name="layoutlmft", version="0.1", author="LayoutLM Team", url="https://github.com/microsoft/unilm/tree/master/layoutlmft", packages=find_packages(), python_requires=">=3.7", extras_require={"dev": ["flake8", "isort", "black"]}, )
data2vec_vision-main
layoutlmft/setup.py
import os import re import numpy as np from transformers.utils import logging logger = logging.get_logger(__name__) PREFIX_CHECKPOINT_DIR = "checkpoint" _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def re_score(pred_relations, gt_relations, mode="strict"): """Evaluate RE predictions Args: pred_relations (list) : list of list of predicted relations (several relations in each sentence) gt_relations (list) : list of list of ground truth relations rel = { "head": (start_idx (inclusive), end_idx (exclusive)), "tail": (start_idx (inclusive), end_idx (exclusive)), "head_type": ent_type, "tail_type": ent_type, "type": rel_type} vocab (Vocab) : dataset vocabulary mode (str) : in 'strict' or 'boundaries'""" assert mode in ["strict", "boundaries"] relation_types = [v for v in [0, 1] if not v == 0] scores = {rel: {"tp": 0, "fp": 0, "fn": 0} for rel in relation_types + ["ALL"]} # Count GT relations and Predicted relations n_sents = len(gt_relations) n_rels = sum([len([rel for rel in sent]) for sent in gt_relations]) n_found = sum([len([rel for rel in sent]) for sent in pred_relations]) # Count TP, FP and FN per type for pred_sent, gt_sent in zip(pred_relations, gt_relations): for rel_type in relation_types: # strict mode takes argument types into account if mode == "strict": pred_rels = { (rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in pred_sent if rel["type"] == rel_type } gt_rels = { (rel["head"], rel["head_type"], rel["tail"], rel["tail_type"]) for rel in gt_sent if rel["type"] == rel_type } # boundaries mode only takes argument spans into account elif mode == "boundaries": pred_rels = {(rel["head"], rel["tail"]) for rel in pred_sent if rel["type"] == rel_type} gt_rels = {(rel["head"], rel["tail"]) for rel in gt_sent if rel["type"] == rel_type} scores[rel_type]["tp"] += len(pred_rels & gt_rels) scores[rel_type]["fp"] += len(pred_rels - gt_rels) scores[rel_type]["fn"] += len(gt_rels - pred_rels) # Compute per entity Precision / Recall / F1 for rel_type in scores.keys(): if scores[rel_type]["tp"]: scores[rel_type]["p"] = scores[rel_type]["tp"] / (scores[rel_type]["fp"] + scores[rel_type]["tp"]) scores[rel_type]["r"] = scores[rel_type]["tp"] / (scores[rel_type]["fn"] + scores[rel_type]["tp"]) else: scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0 if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0: scores[rel_type]["f1"] = ( 2 * scores[rel_type]["p"] * scores[rel_type]["r"] / (scores[rel_type]["p"] + scores[rel_type]["r"]) ) else: scores[rel_type]["f1"] = 0 # Compute micro F1 Scores tp = sum([scores[rel_type]["tp"] for rel_type in relation_types]) fp = sum([scores[rel_type]["fp"] for rel_type in relation_types]) fn = sum([scores[rel_type]["fn"] for rel_type in relation_types]) if tp: precision = tp / (tp + fp) recall = tp / (tp + fn) f1 = 2 * precision * recall / (precision + recall) else: precision, recall, f1 = 0, 0, 0 scores["ALL"]["p"] = precision scores["ALL"]["r"] = recall scores["ALL"]["f1"] = f1 scores["ALL"]["tp"] = tp scores["ALL"]["fp"] = fp scores["ALL"]["fn"] = fn # Compute Macro F1 Scores scores["ALL"]["Macro_f1"] = np.mean([scores[ent_type]["f1"] for ent_type in relation_types]) scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types]) scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types]) logger.info(f"RE Evaluation in *** {mode.upper()} *** mode") logger.info( "processed {} sentences with {} relations; found: {} relations; correct: {}.".format( n_sents, n_rels, n_found, tp ) ) logger.info( "\tALL\t TP: {};\tFP: {};\tFN: {}".format(scores["ALL"]["tp"], scores["ALL"]["fp"], scores["ALL"]["fn"]) ) logger.info("\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)".format(precision, recall, f1)) logger.info( "\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n".format( scores["ALL"]["Macro_p"], scores["ALL"]["Macro_r"], scores["ALL"]["Macro_f1"] ) ) for rel_type in relation_types: logger.info( "\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}".format( rel_type, scores[rel_type]["tp"], scores[rel_type]["fp"], scores[rel_type]["fn"], scores[rel_type]["p"], scores[rel_type]["r"], scores[rel_type]["f1"], scores[rel_type]["tp"] + scores[rel_type]["fp"], ) ) return scores
data2vec_vision-main
layoutlmft/layoutlmft/evaluation.py
from collections import OrderedDict from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, BertConverter, XLMRobertaConverter from transformers.models.auto.modeling_auto import auto_class_factory from .models.layoutlmv2 import ( LayoutLMv2Config, LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast, ) from .models.layoutxlm import ( LayoutXLMConfig, LayoutXLMForRelationExtraction, LayoutXLMForTokenClassification, LayoutXLMTokenizer, LayoutXLMTokenizerFast, ) CONFIG_MAPPING.update([("layoutlmv2", LayoutLMv2Config), ("layoutxlm", LayoutXLMConfig)]) MODEL_NAMES_MAPPING.update([("layoutlmv2", "LayoutLMv2"), ("layoutxlm", "LayoutXLM")]) TOKENIZER_MAPPING.update( [ (LayoutLMv2Config, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast)), (LayoutXLMConfig, (LayoutXLMTokenizer, LayoutXLMTokenizerFast)), ] ) SLOW_TO_FAST_CONVERTERS.update({"LayoutLMv2Tokenizer": BertConverter, "LayoutXLMTokenizer": XLMRobertaConverter}) MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.update( [(LayoutLMv2Config, LayoutLMv2ForTokenClassification), (LayoutXLMConfig, LayoutXLMForTokenClassification)] ) MODEL_FOR_RELATION_EXTRACTION_MAPPING = OrderedDict( [(LayoutLMv2Config, LayoutLMv2ForRelationExtraction), (LayoutXLMConfig, LayoutXLMForRelationExtraction)] ) AutoModelForTokenClassification = auto_class_factory( "AutoModelForTokenClassification", MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, head_doc="token classification" ) AutoModelForRelationExtraction = auto_class_factory( "AutoModelForRelationExtraction", MODEL_FOR_RELATION_EXTRACTION_MAPPING, head_doc="relation extraction" )
data2vec_vision-main
layoutlmft/layoutlmft/__init__.py
from dataclasses import dataclass from typing import Dict, Optional, Tuple import torch from transformers.file_utils import ModelOutput @dataclass class ReOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None entities: Optional[Dict] = None relations: Optional[Dict] = None pred_relations: Optional[Dict] = None
data2vec_vision-main
layoutlmft/layoutlmft/utils.py
data2vec_vision-main
layoutlmft/layoutlmft/models/__init__.py
from dataclasses import dataclass, field from typing import Optional @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, )
data2vec_vision-main
layoutlmft/layoutlmft/models/model_args.py
# coding=utf-8 from transformers.models.layoutlm.tokenization_layoutlm import LayoutLMTokenizer from transformers.utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt", "microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/layoutlmv2-base-uncased": 512, "microsoft/layoutlmv2-large-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True}, "microsoft/layoutlmv2-large-uncased": {"do_lower_case": True}, } class LayoutLMv2Tokenizer(LayoutLMTokenizer): r""" Constructs a LayoutLMv2 tokenizer. :class:`~transformers.LayoutLMv2Tokenizer is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting + wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, model_max_length=512, **kwargs): super().__init__(model_max_length=model_max_length, **kwargs)
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2.py
from .configuration_layoutlmv2 import LayoutLMv2Config from .modeling_layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/__init__.py
# -*- coding: utf-8 -*- def add_layoutlmv2_config(cfg): _C = cfg # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C.MODEL.MASK_ON = True # When using pre-trained models in Detectron1 or any MSRA models, # std has been absorbed into its conv1 weights, so the std needs to be set 1. # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) _C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395] # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # _C.MODEL.BACKBONE.NAME = "build_resnet_fpn_backbone" # ---------------------------------------------------------------------------- # # FPN options # ---------------------------------------------------------------------------- # # Names of the input feature maps to be used by FPN # They must have contiguous power of 2 strides # e.g., ["res2", "res3", "res4", "res5"] _C.MODEL.FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"] # ---------------------------------------------------------------------------- # # Anchor generator options # ---------------------------------------------------------------------------- # # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. # Format: list[list[float]]. SIZES[i] specifies the list of sizes # to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true, # or len(SIZES) == 1 is true and size list SIZES[0] is used for all # IN_FEATURES. _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]] # ---------------------------------------------------------------------------- # # RPN options # ---------------------------------------------------------------------------- # # Names of the input feature maps to be used by RPN # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN _C.MODEL.RPN.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6"] # Number of top scoring RPN proposals to keep before applying NMS # When FPN is used, this is *per FPN level* (not total) _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000 # Number of top scoring RPN proposals to keep after applying NMS # When FPN is used, this limit is applied per level and then again to the union # of proposals from all levels # NOTE: When FPN is used, the meaning of this config is different from Detectron1. # It means per-batch topk in Detectron1, but per-image topk here. # See the "find_top_rpn_proposals" function for details. _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 # ---------------------------------------------------------------------------- # # ROI HEADS options # ---------------------------------------------------------------------------- # _C.MODEL.ROI_HEADS.NAME = "StandardROIHeads" # Number of foreground classes _C.MODEL.ROI_HEADS.NUM_CLASSES = 5 # Names of the input feature maps to be used by ROI heads # Currently all heads (box, mask, ...) use the same input feature map list # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN _C.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3", "p4", "p5"] # ---------------------------------------------------------------------------- # # Box Head # ---------------------------------------------------------------------------- # # C4 don't use head name option # Options for non-C4 models: FastRCNNConvFCHead, _C.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 2 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 # ---------------------------------------------------------------------------- # # Mask Head # ---------------------------------------------------------------------------- # _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7 # ---------------------------------------------------------------------------- # # ResNe[X]t options (ResNets = {ResNet, ResNeXt} # Note that parts of a resnet may be used for both the backbone and the head # These options apply to both # ---------------------------------------------------------------------------- # _C.MODEL.RESNETS.DEPTH = 101 _C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]] _C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] _C.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"] # res4 for C4 backbone, res2..5 for FPN backbone # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt _C.MODEL.RESNETS.NUM_GROUPS = 32 # Baseline width of each group. # Scaling this parameters will scale the width of all bottleneck layers. _C.MODEL.RESNETS.WIDTH_PER_GROUP = 8 # Place the stride 2 conv on the 1x1 filter # Use True only for the original MSRA ResNet; use False for C2 and Torch models _C.MODEL.RESNETS.STRIDE_IN_1X1 = False
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py
# coding=utf-8 import math import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import detectron2 from detectron2.modeling import META_ARCH_REGISTRY from transformers import PreTrainedModel from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput, ) from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput from transformers.utils import logging from ...modules.decoders.re import REDecoder from ...utils import ReOutput from .configuration_layoutlmv2 import LayoutLMv2Config from .detectron2_config import add_layoutlmv2_config logger = logging.get_logger(__name__) LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "layoutlmv2-base-uncased", "layoutlmv2-large-uncased", ] LayoutLMv2LayerNorm = torch.nn.LayerNorm class LayoutLMv2Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super(LayoutLMv2Embeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def _cal_spatial_position_embeddings(self, bbox): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) return spatial_position_embeddings class LayoutLMv2SelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.fast_qkv = config.fast_qkv self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if config.fast_qkv: self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False) self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) else: self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def compute_qkv(self, hidden_states): if self.fast_qkv: qkv = self.qkv_linear(hidden_states) q, k, v = torch.chunk(qkv, 3, dim=-1) if q.ndimension() == self.q_bias.ndimension(): q = q + self.q_bias v = v + self.v_bias else: _sz = (1,) * (q.ndimension() - 1) + (-1,) q = q + self.q_bias.view(*_sz) v = v + self.v_bias.view(*_sz) else: q = self.query(hidden_states) k = self.key(hidden_states) v = self.value(hidden_states) return q, k, v def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): q, k, v = self.compute_qkv(hidden_states) # (B, L, H*D) -> (B, H, L, D) query_layer = self.transpose_for_scores(q) key_layer = self.transpose_for_scores(k) value_layer = self.transpose_for_scores(v) query_layer = query_layer / math.sqrt(self.attention_head_size) # [BSZ, NAT, L, L] attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.has_relative_attention_bias: attention_scores += rel_pos if self.has_spatial_attention_bias: attention_scores += rel_2d_pos attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf")) attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class LayoutLMv2Attention(nn.Module): def __init__(self, config): super().__init__() self.self = LayoutLMv2SelfAttention(config) self.output = LayoutLMv2SelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class LayoutLMv2Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMv2Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LayoutLMv2Attention(config) self.intermediate = LayoutLMv2Intermediate(config) self.output = LayoutLMv2Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret class LayoutLMv2Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)]) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_onehot_size = config.rel_pos_bins self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) def _cal_1d_pos_emb(self, hidden_states, position_ids): rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1) rel_pos = relative_position_bucket( rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos, ) rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states) rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2) rel_pos = rel_pos.contiguous() return rel_pos def _cal_2d_pos_emb(self, hidden_states, bbox): position_coord_x = bbox[:, :, 0] position_coord_y = bbox[:, :, 3] rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1) rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1) rel_pos_x = relative_position_bucket( rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_y = relative_position_bucket( rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2) rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2) rel_pos_x = rel_pos_x.contiguous() rel_pos_y = rel_pos_y.contiguous() rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, bbox=None, position_ids=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class LayoutLMv2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv2Config pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "layoutlmv2" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayoutLMv2LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def my_convert_sync_batchnorm(module, process_group=None): # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d` if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group) module_output = module if isinstance(module, detectron2.layers.FrozenBatchNorm2d): module_output = torch.nn.SyncBatchNorm( num_features=module.num_features, eps=module.eps, affine=True, track_running_stats=True, process_group=process_group, ) module_output.weight = torch.nn.Parameter(module.weight) module_output.bias = torch.nn.Parameter(module.bias) module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device) for name, child in module.named_children(): module_output.add_module(name, my_convert_sync_batchnorm(child, process_group)) del module return module_output class VisualBackbone(nn.Module): def __init__(self, config): super().__init__() self.cfg = detectron2.config.get_cfg() add_layoutlmv2_config(self.cfg) meta_arch = self.cfg.MODEL.META_ARCHITECTURE model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg) assert isinstance(model.backbone, detectron2.modeling.backbone.FPN) self.backbone = model.backbone if ( config.convert_sync_batchnorm and torch.distributed.is_available() and torch.distributed.is_initialized() and torch.distributed.get_rank() > -1 ): self_rank = torch.distributed.get_rank() node_size = torch.cuda.device_count() world_size = torch.distributed.get_world_size() assert world_size % node_size == 0 node_global_ranks = [ list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size) ] sync_bn_groups = [ torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size) ] node_rank = self_rank // node_size assert self_rank in node_global_ranks[node_rank] self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank]) assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD) num_channels = len(self.cfg.MODEL.PIXEL_MEAN) self.register_buffer( "pixel_mean", torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1), ) self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1)) self.out_feature_key = "p2" if torch.is_deterministic(): logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`") input_shape = (224, 224) backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride self.pool = nn.AvgPool2d( ( math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]), math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]), ) ) else: self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2]) if len(config.image_feature_pool_shape) == 2: config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels) assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2] def forward(self, images): images_input = (images.tensor - self.pixel_mean) / self.pixel_std features = self.backbone(images_input) features = features[self.out_feature_key] features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous() return features class LayoutLMv2Model(LayoutLMv2PreTrainedModel): def __init__(self, config): super(LayoutLMv2Model, self).__init__(config) self.config = config self.has_visual_segment_embedding = config.has_visual_segment_embedding self.embeddings = LayoutLMv2Embeddings(config) self.visual = VisualBackbone(config) self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size) if self.has_visual_segment_embedding: self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0]) self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.visual_dropout = nn.Dropout(config.hidden_dropout_prob) self.encoder = LayoutLMv2Encoder(config) self.pooler = LayoutLMv2Pooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.embeddings.word_embeddings(input_ids) position_embeddings = self.embeddings.position_embeddings(position_ids) spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox) token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings embeddings = self.embeddings.LayerNorm(embeddings) embeddings = self.embeddings.dropout(embeddings) return embeddings def _calc_img_embeddings(self, image, bbox, position_ids): visual_embeddings = self.visual_proj(self.visual(image)) position_embeddings = self.embeddings.position_embeddings(position_ids) spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox) embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings if self.has_visual_segment_embedding: embeddings += self.visual_segment_embedding embeddings = self.visual_LayerNorm(embeddings) embeddings = self.visual_dropout(embeddings) return embeddings def forward( self, input_ids=None, bbox=None, image=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device visual_shape = list(input_shape) visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1] visual_shape = torch.Size(visual_shape) final_shape = list(input_shape) final_shape[1] += visual_shape[1] final_shape = torch.Size(final_shape) visual_bbox_x = ( torch.arange( 0, 1000 * (self.config.image_feature_pool_shape[1] + 1), 1000, device=device, dtype=bbox.dtype, ) // self.config.image_feature_pool_shape[1] ) visual_bbox_y = ( torch.arange( 0, 1000 * (self.config.image_feature_pool_shape[0] + 1), 1000, device=device, dtype=bbox.dtype, ) // self.config.image_feature_pool_shape[0] ) visual_bbox = torch.stack( [ visual_bbox_x[:-1].repeat(self.config.image_feature_pool_shape[0], 1), visual_bbox_y[:-1].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1), visual_bbox_x[1:].repeat(self.config.image_feature_pool_shape[0], 1), visual_bbox_y[1:].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1), ], dim=-1, ).view(-1, bbox.size(-1)) visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1) final_bbox = torch.cat([bbox, visual_bbox], dim=1) if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) visual_attention_mask = torch.ones(visual_shape, device=device) final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if position_ids is None: seq_length = input_shape[1] position_ids = self.embeddings.position_ids[:, :seq_length] position_ids = position_ids.expand_as(input_ids) visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat( input_shape[0], 1 ) final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) text_layout_emb = self._calc_text_embeddings( input_ids=input_ids, bbox=bbox, token_type_ids=token_type_ids, position_ids=position_ids, ) visual_emb = self._calc_img_embeddings( image=image, bbox=visual_bbox, position_ids=visual_position_ids, ) final_emb = torch.cat([text_layout_emb, visual_emb], dim=1) extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( final_emb, extended_attention_mask, bbox=final_bbox, position_ids=final_position_ids, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv2 = LayoutLMv2Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings def forward( self, input_ids=None, bbox=None, image=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) seq_length = input_ids.size(1) sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.layoutlmv2 = LayoutLMv2Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.extractor = REDecoder(config) self.init_weights() def forward( self, input_ids, bbox, labels=None, image=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, entities=None, relations=None, ): outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, ) seq_length = input_ids.size(1) sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:] sequence_output = self.dropout(sequence_output) loss, pred_relations = self.extractor(sequence_output, entities, relations) return ReOutput( loss=loss, entities=entities, relations=relations, pred_relations=pred_relations, hidden_states=outputs[0], )
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/modeling_layoutlmv2.py
# coding=utf-8 from transformers.models.layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast from transformers.utils import logging from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt", "microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt", }, "tokenizer_file": { "microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json", "microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/layoutlmv2-base-uncased": 512, "microsoft/layoutlmv2-large-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/layoutlmv2-base-uncased": {"do_lower_case": True}, "microsoft/layoutlmv2-large-uncased": {"do_lower_case": True}, } class LayoutLMv2TokenizerFast(LayoutLMTokenizerFast): r""" Constructs a "Fast" LayoutLMv2Tokenizer. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = LayoutLMv2Tokenizer def __init__(self, model_max_length=512, **kwargs): super().__init__(model_max_length=model_max_length, **kwargs)
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2_fast.py
# coding=utf-8 from transformers.models.layoutlm.configuration_layoutlm import LayoutLMConfig from transformers.utils import logging logger = logging.get_logger(__name__) LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.json", "layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json", } class LayoutLMv2Config(LayoutLMConfig): model_type = "layoutlmv2" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, max_2d_position_embeddings=1024, max_rel_pos=128, rel_pos_bins=32, fast_qkv=True, max_rel_2d_pos=256, rel_2d_pos_bins=64, convert_sync_batchnorm=True, image_feature_pool_shape=[7, 7, 256], coordinate_size=128, shape_size=128, has_relative_attention_bias=True, has_spatial_attention_bias=True, has_visual_segment_embedding=False, **kwargs ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, gradient_checkpointing=gradient_checkpointing, **kwargs, ) self.max_2d_position_embeddings = max_2d_position_embeddings self.max_rel_pos = max_rel_pos self.rel_pos_bins = rel_pos_bins self.fast_qkv = fast_qkv self.max_rel_2d_pos = max_rel_2d_pos self.rel_2d_pos_bins = rel_2d_pos_bins self.convert_sync_batchnorm = convert_sync_batchnorm self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.has_relative_attention_bias = has_relative_attention_bias self.has_spatial_attention_bias = has_spatial_attention_bias self.has_visual_segment_embedding = has_visual_segment_embedding
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlmv2/configuration_layoutlmv2.py
# coding=utf-8 from transformers.utils import logging from ..layoutlmv2 import LayoutLMv2Config logger = logging.get_logger(__name__) LAYOUTXLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/config.json", "layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/config.json", } class LayoutXLMConfig(LayoutLMv2Config): model_type = "layoutxlm"
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutxlm/configuration_layoutxlm.py
# coding=utf-8 from transformers import XLMRobertaTokenizerFast from transformers.file_utils import is_sentencepiece_available from transformers.utils import logging if is_sentencepiece_available(): from .tokenization_layoutxlm import LayoutXLMTokenizer else: LayoutXLMTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model", "layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/tokenizer.json", "layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "layoutxlm-base": 512, "layoutxlm-large": 512, } class LayoutXLMTokenizerFast(XLMRobertaTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = LayoutXLMTokenizer def __init__(self, model_max_length=512, **kwargs): super().__init__(model_max_length=model_max_length, **kwargs)
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutxlm/tokenization_layoutxlm_fast.py
# coding=utf-8 from transformers import XLMRobertaTokenizer from transformers.utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model", "layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "layoutxlm-base": 512, "layoutxlm-large": 512, } class LayoutXLMTokenizer(XLMRobertaTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__(self, model_max_length=512, **kwargs): super().__init__(model_max_length=model_max_length, **kwargs)
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutxlm/tokenization_layoutxlm.py
from .configuration_layoutxlm import LayoutXLMConfig from .modeling_layoutxlm import LayoutXLMForRelationExtraction, LayoutXLMForTokenClassification, LayoutXLMModel from .tokenization_layoutxlm import LayoutXLMTokenizer from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutxlm/__init__.py
# coding=utf-8 from transformers.utils import logging from ..layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model from .configuration_layoutxlm import LayoutXLMConfig logger = logging.get_logger(__name__) LAYOUTXLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "layoutxlm-base", "layoutxlm-large", ] class LayoutXLMModel(LayoutLMv2Model): config_class = LayoutXLMConfig class LayoutXLMForTokenClassification(LayoutLMv2ForTokenClassification): config_class = LayoutXLMConfig class LayoutXLMForRelationExtraction(LayoutLMv2ForRelationExtraction): config_class = LayoutXLMConfig
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutxlm/modeling_layoutxlm.py
from transformers.models.layoutlm import *
data2vec_vision-main
layoutlmft/layoutlmft/models/layoutlm/__init__.py
import collections import time from typing import Any, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset from transformers.trainer_utils import EvalPrediction, PredictionOutput, speed_metrics from transformers.utils import logging from .funsd_trainer import FunsdTrainer if version.parse(torch.__version__) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.get_logger(__name__) class XfunSerTrainer(FunsdTrainer): pass class XfunReTrainer(FunsdTrainer): def __init__(self, **kwargs): super().__init__(**kwargs) self.label_names.append("relations") def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) with torch.no_grad(): if self.use_amp: with autocast(): outputs = model(**inputs) else: outputs = model(**inputs) labels = tuple(inputs.get(name) for name in self.label_names) return outputs, labels def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`. Works both with or without labels. """ if not isinstance(dataloader.dataset, collections.abc.Sized): raise ValueError("dataset must implement __len__") prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) if self.args.deepspeed and not self.args.do_train: # no harm, but flagging to the user that deepspeed config is ignored for eval # flagging only for when --do_train wasn't passed as only then it's redundant logger.info("Detected the deepspeed argument but it will not be used for evaluation") model = self._wrap_model(self.model, training=False) # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while # ``train`` is running, half it first and then put on device if not self.is_in_train and self.args.fp16_full_eval: model = model.half().to(self.args.device) batch_size = dataloader.batch_size num_examples = self.num_examples(dataloader) logger.info("***** Running %s *****", description) logger.info(" Num examples = %d", num_examples) logger.info(" Batch size = %d", batch_size) model.eval() self.callback_handler.eval_dataloader = dataloader re_labels = None pred_relations = None entities = None for step, inputs in enumerate(dataloader): outputs, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) re_labels = labels[1] if re_labels is None else re_labels + labels[1] pred_relations = ( outputs.pred_relations if pred_relations is None else pred_relations + outputs.pred_relations ) entities = outputs.entities if entities is None else entities + outputs.entities self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control) gt_relations = [] for b in range(len(re_labels)): rel_sent = [] for head, tail in zip(re_labels[b]["head"], re_labels[b]["tail"]): rel = {} rel["head_id"] = head rel["head"] = (entities[b]["start"][rel["head_id"]], entities[b]["end"][rel["head_id"]]) rel["head_type"] = entities[b]["label"][rel["head_id"]] rel["tail_id"] = tail rel["tail"] = (entities[b]["start"][rel["tail_id"]], entities[b]["end"][rel["tail_id"]]) rel["tail_type"] = entities[b]["label"][rel["tail_id"]] rel["type"] = 1 rel_sent.append(rel) gt_relations.append(rel_sent) re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations)) re_metrics = { "precision": re_metrics["ALL"]["p"], "recall": re_metrics["ALL"]["r"], "f1": re_metrics["ALL"]["f1"], } re_metrics[f"{metric_key_prefix}_loss"] = outputs.loss.mean().item() metrics = {} # # Prefix all keys with metric_key_prefix + '_' for key in list(re_metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = re_metrics.pop(key) else: metrics[f"{key}"] = re_metrics.pop(key) return metrics def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (:obj:`Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the :obj:`__len__` method. ignore_keys (:obj:`Lst[str]`, `optional`): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default) Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") self.args.local_rank = -1 eval_dataloader = self.get_eval_dataloader(eval_dataset) self.args.local_rank = torch.distributed.get_rank() start_time = time.time() metrics = self.prediction_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset) metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples)) self.log(metrics) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics
data2vec_vision-main
layoutlmft/layoutlmft/trainers/xfun_trainer.py
from .funsd_trainer import FunsdTrainer from .xfun_trainer import XfunReTrainer, XfunSerTrainer
data2vec_vision-main
layoutlmft/layoutlmft/trainers/__init__.py
from typing import Any, Dict, Union import torch from transformers import Trainer class FunsdTrainer(Trainer): def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ for k, v in inputs.items(): if hasattr(v, "to") and hasattr(v, "device"): inputs[k] = v.to(self.args.device) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs
data2vec_vision-main
layoutlmft/layoutlmft/trainers/funsd_trainer.py
data2vec_vision-main
layoutlmft/layoutlmft/modules/__init__.py
data2vec_vision-main
layoutlmft/layoutlmft/modules/decoders/__init__.py
import copy import torch from torch import nn from torch.nn import CrossEntropyLoss class BiaffineAttention(torch.nn.Module): """Implements a biaffine attention operator for binary relation classification. PyTorch implementation of the biaffine attention operator from "End-to-end neural relation extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used as a classifier for binary relation classification. Args: in_features (int): The size of the feature dimension of the inputs. out_features (int): The size of the feature dimension of the output. Shape: - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of additional dimensisons. - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. Examples: >>> batch_size, in_features, out_features = 32, 100, 4 >>> biaffine_attention = BiaffineAttention(in_features, out_features) >>> x_1 = torch.randn(batch_size, in_features) >>> x_2 = torch.randn(batch_size, in_features) >>> output = biaffine_attention(x_1, x_2) >>> print(output.size()) torch.Size([32, 4]) """ def __init__(self, in_features, out_features): super(BiaffineAttention, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True) self.reset_parameters() def forward(self, x_1, x_2): return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1)) def reset_parameters(self): self.bilinear.reset_parameters() self.linear.reset_parameters() class REDecoder(nn.Module): def __init__(self, config): super().__init__() self.entity_emb = nn.Embedding(3, config.hidden_size, scale_grad_by_freq=True) projection = nn.Sequential( nn.Linear(config.hidden_size * 2, config.hidden_size), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), nn.Linear(config.hidden_size, config.hidden_size // 2), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), ) self.ffnn_head = copy.deepcopy(projection) self.ffnn_tail = copy.deepcopy(projection) self.rel_classifier = BiaffineAttention(config.hidden_size // 2, 2) self.loss_fct = CrossEntropyLoss() def build_relation(self, relations, entities): batch_size = len(relations) new_relations = [] for b in range(batch_size): if len(entities[b]["start"]) <= 2: entities[b] = {"end": [1, 1], "label": [0, 0], "start": [0, 0]} all_possible_relations = set( [ (i, j) for i in range(len(entities[b]["label"])) for j in range(len(entities[b]["label"])) if entities[b]["label"][i] == 1 and entities[b]["label"][j] == 2 ] ) if len(all_possible_relations) == 0: all_possible_relations = set([(0, 1)]) positive_relations = set(list(zip(relations[b]["head"], relations[b]["tail"]))) negative_relations = all_possible_relations - positive_relations positive_relations = set([i for i in positive_relations if i in all_possible_relations]) reordered_relations = list(positive_relations) + list(negative_relations) relation_per_doc = {"head": [], "tail": [], "label": []} relation_per_doc["head"] = [i[0] for i in reordered_relations] relation_per_doc["tail"] = [i[1] for i in reordered_relations] relation_per_doc["label"] = [1] * len(positive_relations) + [0] * ( len(reordered_relations) - len(positive_relations) ) assert len(relation_per_doc["head"]) != 0 new_relations.append(relation_per_doc) return new_relations, entities def get_predicted_relations(self, logits, relations, entities): pred_relations = [] for i, pred_label in enumerate(logits.argmax(-1)): if pred_label != 1: continue rel = {} rel["head_id"] = relations["head"][i] rel["head"] = (entities["start"][rel["head_id"]], entities["end"][rel["head_id"]]) rel["head_type"] = entities["label"][rel["head_id"]] rel["tail_id"] = relations["tail"][i] rel["tail"] = (entities["start"][rel["tail_id"]], entities["end"][rel["tail_id"]]) rel["tail_type"] = entities["label"][rel["tail_id"]] rel["type"] = 1 pred_relations.append(rel) return pred_relations def forward(self, hidden_states, entities, relations): batch_size, max_n_words, context_dim = hidden_states.size() device = hidden_states.device relations, entities = self.build_relation(relations, entities) loss = 0 all_pred_relations = [] for b in range(batch_size): head_entities = torch.tensor(relations[b]["head"], device=device) tail_entities = torch.tensor(relations[b]["tail"], device=device) relation_labels = torch.tensor(relations[b]["label"], device=device) entities_start_index = torch.tensor(entities[b]["start"], device=device) entities_labels = torch.tensor(entities[b]["label"], device=device) head_index = entities_start_index[head_entities] head_label = entities_labels[head_entities] head_label_repr = self.entity_emb(head_label) tail_index = entities_start_index[tail_entities] tail_label = entities_labels[tail_entities] tail_label_repr = self.entity_emb(tail_label) head_repr = torch.cat( (hidden_states[b][head_index], head_label_repr), dim=-1, ) tail_repr = torch.cat( (hidden_states[b][tail_index], tail_label_repr), dim=-1, ) heads = self.ffnn_head(head_repr) tails = self.ffnn_tail(tail_repr) logits = self.rel_classifier(heads, tails) loss += self.loss_fct(logits, relation_labels) pred_relations = self.get_predicted_relations(logits, relations[b], entities[b]) all_pred_relations.append(pred_relations) return loss, all_pred_relations
data2vec_vision-main
layoutlmft/layoutlmft/modules/decoders/re.py
# flake8: noqa from .data_collator import DataCollatorForKeyValueExtraction from .datasets import *
data2vec_vision-main
layoutlmft/layoutlmft/data/__init__.py
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] def simplify_bbox(bbox): return [ min(bbox[0::2]), min(bbox[1::2]), max(bbox[2::2]), max(bbox[3::2]), ] def merge_bbox(bbox_list): x0, y0, x1, y1 = list(zip(*bbox_list)) return [min(x0), min(y0), max(x1), max(y1)] def load_image(image_path): image = read_image(image_path, format="BGR") h = image.shape[0] w = image.shape[1] img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)]) image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable return image, (w, h)
data2vec_vision-main
layoutlmft/layoutlmft/data/utils.py
from dataclasses import dataclass, field from typing import Optional @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) pad_to_max_length: bool = field( default=True, metadata={ "help": "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." }, ) label_all_tokens: bool = field( default=False, metadata={ "help": "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) @dataclass class XFUNDataTrainingArguments(DataTrainingArguments): lang: Optional[str] = field(default="en") additional_langs: Optional[str] = field(default=None)
data2vec_vision-main
layoutlmft/layoutlmft/data/data_args.py
from dataclasses import dataclass from typing import Optional, Union import torch from detectron2.structures import ImageList from transformers import PreTrainedTokenizerBase from transformers.file_utils import PaddingStrategy @dataclass class DataCollatorForKeyValueExtraction: """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (:obj:`int`, `optional`, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None has_image_input = "image" in features[0] has_bbox_input = "bbox" in features[0] if has_image_input: image = ImageList.from_tensors([torch.tensor(feature["image"]) for feature in features], 32) for feature in features: del feature["image"] batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt" if labels is None else None, ) if labels is None: return batch sequence_length = torch.tensor(batch["input_ids"]).shape[1] padding_side = self.tokenizer.padding_side if padding_side == "right": batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels] if has_bbox_input: batch["bbox"] = [bbox + [[0, 0, 0, 0]] * (sequence_length - len(bbox)) for bbox in batch["bbox"]] else: batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels] if has_bbox_input: batch["bbox"] = [[[0, 0, 0, 0]] * (sequence_length - len(bbox)) + bbox for bbox in batch["bbox"]] batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v for k, v in batch.items()} if has_image_input: batch["image"] = image return batch
data2vec_vision-main
layoutlmft/layoutlmft/data/data_collator.py
data2vec_vision-main
layoutlmft/layoutlmft/data/datasets/__init__.py
# Lint as: python3 import json import logging import os import datasets from layoutlmft.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox from transformers import AutoTokenizer _URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/" _LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"] logger = logging.getLogger(__name__) class XFUNConfig(datasets.BuilderConfig): """BuilderConfig for XFUN.""" def __init__(self, lang, additional_langs=None, **kwargs): """ Args: lang: string, language for the input text **kwargs: keyword arguments forwarded to super. """ super(XFUNConfig, self).__init__(**kwargs) self.lang = lang self.additional_langs = additional_langs class XFUN(datasets.GeneratorBasedBuilder): """XFUN dataset.""" BUILDER_CONFIGS = [XFUNConfig(name=f"xfun.{lang}", lang=lang) for lang in _LANG] tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") def _info(self): return datasets.DatasetInfo( features=datasets.Features( { "id": datasets.Value("string"), "input_ids": datasets.Sequence(datasets.Value("int64")), "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "labels": datasets.Sequence( datasets.ClassLabel( names=["O", "B-QUESTION", "B-ANSWER", "B-HEADER", "I-ANSWER", "I-QUESTION", "I-HEADER"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), "entities": datasets.Sequence( { "start": datasets.Value("int64"), "end": datasets.Value("int64"), "label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]), } ), "relations": datasets.Sequence( { "head": datasets.Value("int64"), "tail": datasets.Value("int64"), "start_index": datasets.Value("int64"), "end_index": datasets.Value("int64"), } ), } ), supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { "train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"], "val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"], # "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"], } downloaded_files = dl_manager.download_and_extract(urls_to_download) train_files_for_many_langs = [downloaded_files["train"]] val_files_for_many_langs = [downloaded_files["val"]] # test_files_for_many_langs = [downloaded_files["test"]] if self.config.additional_langs: additional_langs = self.config.additional_langs.split("+") if "all" in additional_langs: additional_langs = [lang for lang in _LANG if lang != self.config.lang] for lang in additional_langs: urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]} additional_downloaded_files = dl_manager.download_and_extract(urls_to_download) train_files_for_many_langs.append(additional_downloaded_files["train"]) logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})") logger.info(f"Evaluating on {self.config.lang}") logger.info(f"Testing on {self.config.lang}") return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs} ), # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}), ] def _generate_examples(self, filepaths): for filepath in filepaths: logger.info("Generating examples from = %s", filepath) with open(filepath[0], "r") as f: data = json.load(f) for doc in data["documents"]: doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"]) image, size = load_image(doc["img"]["fpath"]) document = doc["document"] tokenized_doc = {"input_ids": [], "bbox": [], "labels": []} entities = [] relations = [] id2label = {} entity_id_to_index_map = {} empty_entity = set() for line in document: if len(line["text"]) == 0: empty_entity.add(line["id"]) continue id2label[line["id"]] = line["label"] relations.extend([tuple(sorted(l)) for l in line["linking"]]) tokenized_inputs = self.tokenizer( line["text"], add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False, ) text_length = 0 ocr_length = 0 bbox = [] last_box = None for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]): if token_id == 6: bbox.append(None) continue text_length += offset[1] - offset[0] tmp_box = [] while ocr_length < text_length: ocr_word = line["words"].pop(0) ocr_length += len( self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip()) ) tmp_box.append(simplify_bbox(ocr_word["box"])) if len(tmp_box) == 0: tmp_box = last_box bbox.append(normalize_bbox(merge_bbox(tmp_box), size)) last_box = tmp_box bbox = [ [bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b for i, b in enumerate(bbox) ] if line["label"] == "other": label = ["O"] * len(bbox) else: label = [f"I-{line['label'].upper()}"] * len(bbox) label[0] = f"B-{line['label'].upper()}" tokenized_inputs.update({"bbox": bbox, "labels": label}) if label[0] != "O": entity_id_to_index_map[line["id"]] = len(entities) entities.append( { "start": len(tokenized_doc["input_ids"]), "end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]), "label": line["label"].upper(), } ) for i in tokenized_doc: tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i] relations = list(set(relations)) relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity] kvrelations = [] for rel in relations: pair = [id2label[rel[0]], id2label[rel[1]]] if pair == ["question", "answer"]: kvrelations.append( {"head": entity_id_to_index_map[rel[0]], "tail": entity_id_to_index_map[rel[1]]} ) elif pair == ["answer", "question"]: kvrelations.append( {"head": entity_id_to_index_map[rel[1]], "tail": entity_id_to_index_map[rel[0]]} ) else: continue def get_relation_span(rel): bound = [] for entity_index in [rel["head"], rel["tail"]]: bound.append(entities[entity_index]["start"]) bound.append(entities[entity_index]["end"]) return min(bound), max(bound) relations = sorted( [ { "head": rel["head"], "tail": rel["tail"], "start_index": get_relation_span(rel)[0], "end_index": get_relation_span(rel)[1], } for rel in kvrelations ], key=lambda x: x["head"], ) chunk_size = 512 for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)): item = {} for k in tokenized_doc: item[k] = tokenized_doc[k][index : index + chunk_size] entities_in_this_span = [] global_to_local_map = {} for entity_id, entity in enumerate(entities): if ( index <= entity["start"] < index + chunk_size and index <= entity["end"] < index + chunk_size ): entity["start"] = entity["start"] - index entity["end"] = entity["end"] - index global_to_local_map[entity_id] = len(entities_in_this_span) entities_in_this_span.append(entity) relations_in_this_span = [] for relation in relations: if ( index <= relation["start_index"] < index + chunk_size and index <= relation["end_index"] < index + chunk_size ): relations_in_this_span.append( { "head": global_to_local_map[relation["head"]], "tail": global_to_local_map[relation["tail"]], "start_index": relation["start_index"] - index, "end_index": relation["end_index"] - index, } ) item.update( { "id": f"{doc['id']}_{chunk_id}", "image": image, "entities": entities_in_this_span, "relations": relations_in_this_span, } ) yield f"{doc['id']}_{chunk_id}", item
data2vec_vision-main
layoutlmft/layoutlmft/data/datasets/xfun.py
# coding=utf-8 import json import os import datasets from layoutlmft.data.utils import load_image, normalize_bbox logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{Jaume2019FUNSDAD, title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, author={Guillaume Jaume and H. K. Ekenel and J. Thiran}, journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)}, year={2019}, volume={2}, pages={1-6} } """ _DESCRIPTION = """\ https://guillaumejaume.github.io/FUNSD/ """ class FunsdConfig(datasets.BuilderConfig): """BuilderConfig for FUNSD""" def __init__(self, **kwargs): """BuilderConfig for FUNSD. Args: **kwargs: keyword arguments forwarded to super. """ super(FunsdConfig, self).__init__(**kwargs) class Funsd(datasets.GeneratorBasedBuilder): """Conll2003 dataset.""" BUILDER_CONFIGS = [ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), } ), supervised_keys=None, homepage="https://guillaumejaume.github.io/FUNSD/", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"} ), ] def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "annotations") img_dir = os.path.join(filepath, "images") for guid, file in enumerate(sorted(os.listdir(ann_dir))): tokens = [] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for item in data["form"]: words, label = item["words"], item["label"] words = [w for w in words if w["text"].strip() != ""] if len(words) == 0: continue if label == "other": for w in words: tokens.append(w["text"]) ner_tags.append("O") bboxes.append(normalize_bbox(w["box"], size)) else: tokens.append(words[0]["text"]) ner_tags.append("B-" + label.upper()) bboxes.append(normalize_bbox(words[0]["box"], size)) for w in words[1:]: tokens.append(w["text"]) ner_tags.append("I-" + label.upper()) bboxes.append(normalize_bbox(w["box"], size)) yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags, "image": image}
data2vec_vision-main
layoutlmft/layoutlmft/data/datasets/funsd.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.xfun import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.data_args import XFUNDataTrainingArguments from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import XfunSerTrainer from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset( os.path.abspath(layoutlmft.data.datasets.xfun.__file__), f"xfun.{data_args.lang}", additional_langs=data_args.additional_langs, keep_in_memory=True, ) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "input_ids" label_column_name = "labels" remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = XfunSerTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
data2vec_vision-main
layoutlmft/examples/run_xfun_ser.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.funsd import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.data_args import DataTrainingArguments from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import FunsdTrainer as Trainer from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) def main(): # See all possible arguments in layoutlmft/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset(os.path.abspath(layoutlmft.data.datasets.funsd.__file__)) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "tokens" if "tokens" in column_names else column_names[0] label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], padding=padding, truncation=True, return_overflowing_tokens=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] bboxes = [] images = [] for batch_index in range(len(tokenized_inputs["input_ids"])): word_ids = tokenized_inputs.word_ids(batch_index=batch_index) org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index] label = examples[label_column_name][org_batch_index] bbox = examples["bboxes"][org_batch_index] image = examples["image"][org_batch_index] previous_word_idx = None label_ids = [] bbox_inputs = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) bbox_inputs.append([0, 0, 0, 0]) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) bbox_inputs.append(bbox[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) bbox_inputs.append(bbox[word_idx]) previous_word_idx = word_idx labels.append(label_ids) bboxes.append(bbox_inputs) images.append(image) tokenized_inputs["labels"] = labels tokenized_inputs["bbox"] = bboxes tokenized_inputs["image"] = images return tokenized_inputs if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) eval_dataset = eval_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) test_dataset = test_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
data2vec_vision-main
layoutlmft/examples/run_funsd.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys import numpy as np from datasets import ClassLabel, load_dataset import layoutlmft.data.datasets.xfun import transformers from layoutlmft import AutoModelForRelationExtraction from layoutlmft.data.data_args import XFUNDataTrainingArguments from layoutlmft.data.data_collator import DataCollatorForKeyValueExtraction from layoutlmft.evaluation import re_score from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import XfunReTrainer from transformers import ( AutoConfig, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset( os.path.abspath(layoutlmft.data.datasets.xfun.__file__), f"xfun.{data_args.lang}", additional_langs=data_args.additional_langs, keep_in_memory=True, ) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "input_ids" label_column_name = "labels" remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForRelationExtraction.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) def compute_metrics(p): pred_relations, gt_relations = p score = re_score(pred_relations, gt_relations, mode="boundaries") return score # Initialize our Trainer trainer = XfunReTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
data2vec_vision-main
layoutlmft/examples/run_xfun_re.py
""" Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py and setup.py. 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory. (this will build a wheel for the python version you use to build it - make sure you use python 3.x). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi allennlp 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ from setuptools import find_packages, setup setup( name="pytorch_pretrained_bert", version="0.4.0", author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors", author_email="[email protected]", description="PyTorch version of Google AI BERT model with script to load Google pre-trained models", long_description="pytorch", long_description_content_type="text/markdown", keywords='BERT NLP deep learning google', license='Apache', url="https://github.com/huggingface/pytorch-pretrained-BERT", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['numpy', 'boto3', 'requests', 'tqdm'], entry_points={ 'console_scripts': [ "pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main" ] }, python_requires='>=3.5.0', tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
data2vec_vision-main
unilm-v1/src/setup.py
import torch from torch.nn import DataParallel from torch.cuda._utils import _get_device_index from torch.nn.parallel._functions import Scatter from itertools import chain def scatter_imbalance(inputs, target_gpus, dim=0): r""" Slices tensors into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not tensors. """ def scatter_map(obj): if isinstance(obj, torch.Tensor): if (len(target_gpus) == 4) and (obj.size(dim) == 22): return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj) if (len(target_gpus) == 4) and (obj.size(dim) == 60): return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj) elif (len(target_gpus) == 4) and (obj.size(dim) == 144): return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 46): return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 62): return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 94): return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 110): return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 118): return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 126): return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 134): return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 142): return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj) elif (len(target_gpus) == 16) and (obj.size(dim) == 222): return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj) return Scatter.apply(target_gpus, None, dim, obj) if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: return list(map(list, zip(*map(scatter_map, obj)))) if isinstance(obj, dict) and len(obj) > 0: return list(map(type(obj), zip(*map(scatter_map, obj.items())))) return [obj for targets in target_gpus] # After scatter_map is called, a scatter_map cell will exist. This cell # has a reference to the actual function scatter_map, which has references # to a closure that has a reference to the scatter_map cell (because the # fn is recursive). To avoid this reference cycle, we set the function to # None, clearing the cell try: return scatter_map(inputs) finally: scatter_map = None def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0): r"""Scatter with support for kwargs dictionary""" inputs = scatter_imbalance(inputs, target_gpus, dim) if inputs else [] kwargs = scatter_imbalance(kwargs, target_gpus, dim) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs class DataParallelImbalance(DataParallel): def __init__(self, module, device_ids=None, output_device=None, dim=0): super(DataParallelImbalance, self).__init__( module, device_ids, output_device, dim) if not torch.cuda.is_available(): self.module = module self.device_ids = [] return if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if output_device is None: output_device = device_ids[0] if not all(t.is_cuda and t.device.index == device_ids[0] for t in chain(module.parameters(), module.buffers())): raise RuntimeError("module must have its parameters and buffers " "on device %d (device_ids[0])" % device_ids[0]) self.dim = dim self.module = module self.device_ids = list( map(lambda x: _get_device_index(x, True), device_ids)) self.output_device = _get_device_index(output_device, True) if len(self.device_ids) == 1: self.module.cuda(device_ids[0]) def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) inputs, kwargs = self.scatter_imbalance( inputs, kwargs, self.device_ids) if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) outputs = self.parallel_apply(replicas, inputs, kwargs) return self.gather(outputs, self.output_device) def scatter_imbalance(self, inputs, kwargs, device_ids): return scatter_kwargs_imbalance(inputs, kwargs, device_ids, dim=self.dim)
data2vec_vision-main
unilm-v1/src/nn/data_parallel.py
data2vec_vision-main
unilm-v1/src/nn/__init__.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def warmup_cosine(x, warmup=0.002): if x < warmup: return x/warmup return 0.5 * (1.0 + torch.cos(math.pi * x)) def warmup_constant(x, warmup=0.002): if x < warmup: return x/warmup return 1.0 def warmup_linear(x, warmup=0.002): if x < warmup: return x/warmup return max((x-1.)/(warmup-1.), 0) SCHEDULES = { 'warmup_cosine': warmup_cosine, 'warmup_constant': warmup_constant, 'warmup_linear': warmup_linear, } class BertAdam(Optimizer): """Implements BERT version of Adam algorithm with weight decay fix. Params: lr: learning rate warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 t_total: total number of training steps for the learning rate schedule, -1 means constant learning rate. Default: -1 schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' b1: Adams b1. Default: 0.9 b2: Adams b2. Default: 0.999 e: Adams epsilon. Default: 1e-6 weight_decay: Weight decay. Default: 0.01 max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 """ def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0): if lr is not required and lr < 0.0: raise ValueError( "Invalid learning rate: {} - should be >= 0.0".format(lr)) if schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= warmup < 1.0 and not warmup == -1: raise ValueError( "Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) if not 0.0 <= b1 < 1.0: raise ValueError( "Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) if not 0.0 <= b2 < 1.0: raise ValueError( "Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) if not e >= 0.0: raise ValueError( "Invalid epsilon value: {} - should be >= 0.0".format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if len(state) == 0: return [0] if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss class BertAdamFineTune(BertAdam): def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0): self.init_param_group = [] super(BertAdamFineTune, self).__init__(params, lr, warmup, t_total, schedule, b1, b2, e, weight_decay, max_grad_norm) def save_init_param_group(self, param_groups, name_groups, missing_keys): self.init_param_group = [] for group, name in zip(param_groups, name_groups): if group['weight_decay'] > 0.0: init_p_list = [] for p, n in zip(group['params'], name): init_p = p.data.clone().detach() if any(mk in n for mk in missing_keys): print("[no finetuning weight decay]", n) # should use the original weight decay init_p.zero_() init_p_list.append(init_p) self.init_param_group.append(init_p_list) else: # placeholder self.init_param_group.append([]) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for i_group, group in enumerate(self.param_groups): for i_p, p in enumerate(group['params']): if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: if self.init_param_group: update += group['weight_decay'] * \ (2.0 * p.data - self.init_param_group[i_group][i_p]) else: update += group['weight_decay'] * p.data if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss def load_state_dict_subset_finetune(self, state_dict, num_load_group): r"""Loads the optimizer state. Arguments: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = deepcopy(state_dict) # Validate the state_dict groups = self.param_groups saved_groups = state_dict['param_groups'] if len(groups) < num_load_group or len(saved_groups) < num_load_group: raise ValueError("loaded state dict has a different number of " "parameter groups") param_lens = (len(g['params']) for g in groups[:num_load_group]) saved_lens = (len(g['params']) for g in saved_groups[:num_load_group]) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError("loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group") # Update the state id_map = {old_id: p for old_id, p in zip(chain(*(g['params'] for g in saved_groups[:num_load_group])), chain(*(g['params'] for g in groups[:num_load_group])))} def cast(param, value): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. if param.is_floating_point(): value = value.to(param.dtype) value = value.to(param.device) return value elif isinstance(value, dict): return {k: cast(param, v) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). state = defaultdict(dict) for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] state[param] = cast(param, v) else: state[k] = v # handle additional params for k, v in self.state: if k not in state: state[k] = v # do not change groups: {'weight_decay': 0.01, 'lr': 9.995e-06, 'schedule': 'warmup_linear', 'warmup': 0.1, 't_total': 400000, 'b1': 0.9, 'b2': 0.999, 'e': 1e-06, 'max_grad_norm': 1.0, 'params': [...]} # # Update parameter groups, setting their 'params' value # def update_group(group, new_group): # new_group['params'] = group['params'] # return new_group # param_groups = [ # update_group(g, ng) for g, ng in zip(groups[:num_load_group], saved_groups[:num_load_group])] # # handle additional params # param_groups.extend(groups[num_load_group:]) self.__setstate__({'state': state, 'param_groups': groups}) def find_state_dict_subset_finetune(org_state_dict, org_name_list, no_decay, param_optimizer): # only use the bert encoder and embeddings want_name_set = set() for n in org_name_list: if ('bert.encoder' in n) or ('bert.embeddings' in n): want_name_set.add(n) # original: name to pid, pid to name org_grouped_names = [[n for n in org_name_list if not any(nd in n for nd in no_decay)], [n for n in org_name_list if any(nd in n for nd in no_decay)]] org_n2id, org_id2n = {}, {} for ng, pg in zip(org_grouped_names, org_state_dict['param_groups']): for n, pid in zip(ng, pg['params']): org_n2id[n] = pid org_id2n[pid] = n # group by: whether pretrained; whether weight decay g_np_list = [ [(n, p) for n, p in param_optimizer if n in want_name_set and not any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n in want_name_set and any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n not in want_name_set and not any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n not in want_name_set and any( nd in n for nd in no_decay)], ] optimizer_grouped_parameters = [ {'params': [p for n, p in g_np_list[0]], 'weight_decay': 0.01}, {'params': [p for n, p in g_np_list[1]], 'weight_decay': 0.0}, {'params': [p for n, p in g_np_list[2]], 'weight_decay': 0.01}, {'params': [p for n, p in g_np_list[3]], 'weight_decay': 0.0} ] new_state_dict = {} # regroup the original state_dict new_state_dict['state'] = {pid: v for pid, v in org_state_dict['state'].items( ) if pid not in org_id2n or org_id2n[pid] in want_name_set} # reset step count to 0 for pid, st in new_state_dict['state'].items(): st['step'] = 0 def _filter_group(group, g_np_list, i, org_n2id): packed = {k: v for k, v in group.items() if k != 'params'} packed['params'] = [pid for pid in group['params'] if pid in org_id2n and org_id2n[pid] in want_name_set] assert len(g_np_list[i]) == len(packed['params']) # keep them the same order packed['params'] = [org_n2id[n] for n, p in g_np_list[i]] return packed new_state_dict['param_groups'] = [_filter_group( g, g_np_list, i, org_n2id) for i, g in enumerate(org_state_dict['param_groups'])] return new_state_dict, optimizer_grouped_parameters
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/optimization.py
__version__ = "0.4.0" from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer from .modeling import (BertConfig, BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction, BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, BertForPreTrainingLossMask, BertPreTrainingPairRel, BertPreTrainingPairTransform) from .optimization import BertAdam, BertAdamFineTune from .optimization_fp16 import FP16_Optimizer_State from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/__init__.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import os import logging from .file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, } VOCAB_NAME = 'vocab.txt' def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" # mapping unused tokens to special tokens extra_map = {} extra_map['[unused1]'] = '[X_SEP]' for i in range(10): extra_map['[unused{}]'.format(i+2)] = '[SEP_{}]'.format(i) extra_map['[unused12]'] = '[S2S_SEP]' extra_map['[unused13]'] = '[S2S_CLS]' extra_map['[unused14]'] = '[L2R_SEP]' extra_map['[unused15]'] = '[L2R_CLS]' extra_map['[unused16]'] = '[R2L_SEP]' extra_map['[unused17]'] = '[R2L_CLS]' extra_map['[unused18]'] = '[S2S_SOS]' vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() if token in extra_map: token = extra_map[token] vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a peice of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, never_split=("[UNK]", "[SEP]", "[X_SEP]", "[PAD]", "[CLS]", "[MASK]")): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: raise ValueError( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format( len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens @classmethod def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name] else: vocab_file = pretrained_model_name if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer class WhitespaceTokenizer(object): def tokenize(self, text): return whitespace_tokenize(text) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case self.never_split = never_split def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" if text in self.never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/tokenization.py
# coding=utf-8 """PyTorch optimization for BERT model.""" from apex.optimizers import FP16_Optimizer class FP16_Optimizer_State(FP16_Optimizer): def __init__(self, init_optimizer, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True): super(FP16_Optimizer_State, self).__init__(init_optimizer, static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose) def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict['optimizer_state_dict'] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat return state_dict def load_state_dict(self, state_dict): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] if state_dict['dynamic_loss_scale']: self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']): current.data.copy_(saved.data)
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/optimization_fp16.py
# coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class LabelSmoothingLoss(_Loss): """ With label smoothing, KL-divergence between q_{smoothed ground truth prob.}(w) and p_{prob. computed by model}(w) is minimized. """ def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'): assert 0.0 < label_smoothing <= 1.0 self.ignore_index = ignore_index super(LabelSmoothingLoss, self).__init__( size_average=size_average, reduce=reduce, reduction=reduction) assert label_smoothing > 0 assert tgt_vocab_size > 0 smoothing_value = label_smoothing / (tgt_vocab_size - 2) one_hot = torch.full((tgt_vocab_size,), smoothing_value) one_hot[self.ignore_index] = 0 self.register_buffer('one_hot', one_hot.unsqueeze(0)) self.confidence = 1.0 - label_smoothing self.tgt_vocab_size = tgt_vocab_size def forward(self, output, target): """ output (FloatTensor): batch_size * num_pos * n_classes target (LongTensor): batch_size * num_pos """ assert self.tgt_vocab_size == output.size(2) batch_size, num_pos = target.size(0), target.size(1) output = output.view(-1, self.tgt_vocab_size) target = target.view(-1) model_prob = self.one_hot.repeat(target.size(0), 1) model_prob.scatter_(1, target.unsqueeze(1), self.confidence) model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0) return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/loss.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import os import logging import shutil import tempfile import json from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from hashlib import sha256 from functools import wraps from tqdm import tqdm import boto3 from botocore.exceptions import ClientError import requests logger = logging.getLogger(__name__) # pylint: disable=invalid-name PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', Path.home() / '.pytorch_pretrained_bert')) def url_to_filename(url: str, etag: str = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise FileNotFoundError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url: str, temp_file: IO) -> None: req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename: str) -> Set[str]: ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path: str, dot=True, lower: bool = True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/file_utils.py
# coding=utf-8 """PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from scipy.stats import truncnorm import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from .file_utils import cached_path from .loss import LabelSmoothingLoss logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, relax_projection=0, new_pos_ids=False, initializer_range=0.02, task_idx=None, fp32_embedding=False, ffn_type=0, label_smoothing=None, num_qkv=0, seg_emb=False): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.relax_projection = relax_projection self.new_pos_ids = new_pos_ids self.initializer_range = initializer_range self.task_idx = task_idx self.fp32_embedding = fp32_embedding self.ffn_type = ffn_type self.label_smoothing = label_smoothing self.num_qkv = num_qkv self.seg_emb = seg_emb else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding( config.vocab_size, config.hidden_size) self.token_type_embeddings = nn.Embedding( config.type_vocab_size, config.hidden_size) if hasattr(config, 'fp32_embedding'): self.fp32_embedding = config.fp32_embedding else: self.fp32_embedding = False if hasattr(config, 'new_pos_ids') and config.new_pos_ids: self.num_pos_emb = 4 else: self.num_pos_emb = 1 self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size*self.num_pos_emb) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange( seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) if self.num_pos_emb > 1: num_batch = position_embeddings.size(0) num_pos = position_embeddings.size(1) position_embeddings = position_embeddings.view( num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] embeddings = words_embeddings + position_embeddings + token_type_embeddings if self.fp32_embedding: embeddings = embeddings.half() embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int( config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size if hasattr(config, 'num_qkv') and (config.num_qkv > 1): self.num_qkv = config.num_qkv else: self.num_qkv = 1 self.query = nn.Linear( config.hidden_size, self.all_head_size*self.num_qkv) self.key = nn.Linear(config.hidden_size, self.all_head_size*self.num_qkv) self.value = nn.Linear( config.hidden_size, self.all_head_size*self.num_qkv) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.uni_debug_flag = True if os.getenv( 'UNI_DEBUG_FLAG', '') else False if self.uni_debug_flag: self.register_buffer('debug_attention_probs', torch.zeros((512, 512))) if hasattr(config, 'seg_emb') and config.seg_emb: self.b_q_s = nn.Parameter(torch.zeros( 1, self.num_attention_heads, 1, self.attention_head_size)) self.seg_emb = nn.Embedding( config.type_vocab_size, self.all_head_size) else: self.b_q_s = None self.seg_emb = None def transpose_for_scores(self, x, mask_qkv=None): if self.num_qkv > 1: sz = x.size()[:-1] + (self.num_qkv, self.num_attention_heads, self.all_head_size) # (batch, pos, num_qkv, head, head_hid) x = x.view(*sz) if mask_qkv is None: x = x[:, :, 0, :, :] elif isinstance(mask_qkv, int): x = x[:, :, mask_qkv, :, :] else: # mask_qkv: (batch, pos) if mask_qkv.size(1) > sz[1]: mask_qkv = mask_qkv[:, :sz[1]] # -> x: (batch, pos, head, head_hid) x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand( sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2) else: sz = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) # (batch, pos, head, head_hid) x = x.view(*sz) # (batch, head, pos, head_hid) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): if history_states is None: mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) else: x_states = torch.cat((history_states, hidden_states), dim=1) mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(x_states) mixed_value_layer = self.value(x_states) query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv) key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv) value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch, head, pos, pos) attention_scores = torch.matmul( query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2)) if self.seg_emb is not None: seg_rep = self.seg_emb(seg_ids) # (batch, pos, head, head_hid) seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size( 1), self.num_attention_heads, self.attention_head_size) qs = torch.einsum('bnih,bjnh->bnij', query_layer+self.b_q_s, seg_rep) attention_scores = attention_scores + qs # attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if self.uni_debug_flag: _pos = attention_probs.size(-1) self.debug_attention_probs[:_pos, :_pos].copy_( attention_probs[0].mean(0).view(_pos, _pos)) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[ :-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): self_output = self.self( input_tensor, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TransformerFFN(nn.Module): def __init__(self, config): super(TransformerFFN, self).__init__() self.ffn_type = config.ffn_type assert self.ffn_type in (1, 2) if self.ffn_type in (1, 2): self.wx0 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (2,): self.wx1 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (1, 2): self.output = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, x): if self.ffn_type in (1, 2): x0 = self.wx0(x) if self.ffn_type == 1: x1 = x elif self.ffn_type == 2: x1 = self.wx1(x) out = self.output(x0 * x1) out = self.dropout(out) out = self.LayerNorm(out + x) return out class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.ffn_type = config.ffn_type if self.ffn_type: self.ffn = TransformerFFN(config) else: self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): attention_output = self.attention( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) if self.ffn_type: layer_output = self.ffn(attention_output) else: intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() layer = BertLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, seg_ids=None): # history embedding and encoded layer must be simultanously given assert (prev_embedding is None) == (prev_encoded_layers is None) all_encoder_layers = [] if (prev_embedding is not None) and (prev_encoded_layers is not None): history_states = prev_embedding for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if prev_encoded_layers is not None: history_states = prev_encoded_layers[i] else: for layer_module in self.layer: hidden_states = layer_module( hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act hid_size = config.hidden_size if hasattr(config, 'relax_projection') and (config.relax_projection > 1): hid_size *= config.relax_projection self.dense = nn.Linear(config.hidden_size, hid_size) self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros( bert_model_embedding_weights.size(0))) if hasattr(config, 'relax_projection') and (config.relax_projection > 1): self.relax_projection = config.relax_projection else: self.relax_projection = 0 self.fp32_embedding = config.fp32_embedding def convert_to_type(tensor): if self.fp32_embedding: return tensor.half() else: return tensor self.type_converter = convert_to_type self.converted = False def forward(self, hidden_states, task_idx=None): if not self.converted: self.converted = True if self.fp32_embedding: self.transform.half() hidden_states = self.transform(self.type_converter(hidden_states)) if self.relax_projection > 1: num_batch = hidden_states.size(0) num_pos = hidden_states.size(1) # (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid) hidden_states = hidden_states.view( num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] if self.fp32_embedding: hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter( self.decoder.weight), self.type_converter(self.bias)) else: hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights, num_labels=2): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, num_labels) def forward(self, sequence_output, pooled_output, task_idx=None): prediction_scores = self.predictions(sequence_output, task_idx) if pooled_output is None: seq_relationship_score = None else: seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class PreTrainedBertModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(PreTrainedBertModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name] else: archive_file = pretrained_model_name # redirect to the cache, if necessary try: resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config if ('config_path' in kwargs) and kwargs['config_path']: config_file = kwargs['config_path'] else: config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) # define new type_vocab_size (there might be different numbers of segment ids) if 'type_vocab_size' in kwargs: config.type_vocab_size = kwargs['type_vocab_size'] # define new relax_projection if ('relax_projection' in kwargs) and kwargs['relax_projection']: config.relax_projection = kwargs['relax_projection'] # new position embedding if ('new_pos_ids' in kwargs) and kwargs['new_pos_ids']: config.new_pos_ids = kwargs['new_pos_ids'] # define new relax_projection if ('task_idx' in kwargs) and kwargs['task_idx']: config.task_idx = kwargs['task_idx'] # define new max position embedding for length expansion if ('max_position_embeddings' in kwargs) and kwargs['max_position_embeddings']: config.max_position_embeddings = kwargs['max_position_embeddings'] # use fp32 for embeddings if ('fp32_embedding' in kwargs) and kwargs['fp32_embedding']: config.fp32_embedding = kwargs['fp32_embedding'] # type of FFN in transformer blocks if ('ffn_type' in kwargs) and kwargs['ffn_type']: config.ffn_type = kwargs['ffn_type'] # label smoothing if ('label_smoothing' in kwargs) and kwargs['label_smoothing']: config.label_smoothing = kwargs['label_smoothing'] # dropout if ('hidden_dropout_prob' in kwargs) and kwargs['hidden_dropout_prob']: config.hidden_dropout_prob = kwargs['hidden_dropout_prob'] if ('attention_probs_dropout_prob' in kwargs) and kwargs['attention_probs_dropout_prob']: config.attention_probs_dropout_prob = kwargs['attention_probs_dropout_prob'] # different QKV if ('num_qkv' in kwargs) and kwargs['num_qkv']: config.num_qkv = kwargs['num_qkv'] # segment embedding for self-attention if ('seg_emb' in kwargs) and kwargs['seg_emb']: config.seg_emb = kwargs['seg_emb'] # initialize word embeddings _word_emb_map = None if ('word_emb_map' in kwargs) and kwargs['word_emb_map']: _word_emb_map = kwargs['word_emb_map'] logger.info("Model config {}".format(config)) # clean the arguments in kwargs for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx', 'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing', 'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb', 'word_emb_map'): if arg_clean in kwargs: del kwargs[arg_clean] # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # initialize new segment embeddings _k = 'bert.embeddings.token_type_embeddings.weight' if (_k in state_dict) and (config.type_vocab_size != state_dict[_k].shape[0]): logger.info("config.type_vocab_size != state_dict[bert.embeddings.token_type_embeddings.weight] ({0} != {1})".format( config.type_vocab_size, state_dict[_k].shape[0])) if config.type_vocab_size > state_dict[_k].shape[0]: # state_dict[_k].data = state_dict[_k].data.resize_(config.type_vocab_size, state_dict[_k].shape[1]) state_dict[_k].resize_( config.type_vocab_size, state_dict[_k].shape[1]) # L2R if config.type_vocab_size >= 3: state_dict[_k].data[2, :].copy_(state_dict[_k].data[0, :]) # R2L if config.type_vocab_size >= 4: state_dict[_k].data[3, :].copy_(state_dict[_k].data[0, :]) # S2S if config.type_vocab_size >= 6: state_dict[_k].data[4, :].copy_(state_dict[_k].data[0, :]) state_dict[_k].data[5, :].copy_(state_dict[_k].data[1, :]) if config.type_vocab_size >= 7: state_dict[_k].data[6, :].copy_(state_dict[_k].data[1, :]) elif config.type_vocab_size < state_dict[_k].shape[0]: state_dict[_k].data = state_dict[_k].data[:config.type_vocab_size, :] _k = 'bert.embeddings.position_embeddings.weight' n_config_pos_emb = 4 if config.new_pos_ids else 1 if (_k in state_dict) and (n_config_pos_emb*config.hidden_size != state_dict[_k].shape[1]): logger.info("n_config_pos_emb*config.hidden_size != state_dict[bert.embeddings.position_embeddings.weight] ({0}*{1} != {2})".format( n_config_pos_emb, config.hidden_size, state_dict[_k].shape[1])) assert state_dict[_k].shape[1] % config.hidden_size == 0 n_state_pos_emb = int(state_dict[_k].shape[1]/config.hidden_size) assert (n_state_pos_emb == 1) != (n_config_pos_emb == 1), "!!!!n_state_pos_emb == 1 xor n_config_pos_emb == 1!!!!" if n_state_pos_emb == 1: state_dict[_k].data = state_dict[_k].data.unsqueeze(1).repeat( 1, n_config_pos_emb, 1).reshape((config.max_position_embeddings, n_config_pos_emb*config.hidden_size)) elif n_config_pos_emb == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 state_dict[_k].data = state_dict[_k].data.view( config.max_position_embeddings, n_state_pos_emb, config.hidden_size).select(1, _task_idx) # initialize new position embeddings _k = 'bert.embeddings.position_embeddings.weight' if _k in state_dict and config.max_position_embeddings != state_dict[_k].shape[0]: logger.info("config.max_position_embeddings != state_dict[bert.embeddings.position_embeddings.weight] ({0} - {1})".format( config.max_position_embeddings, state_dict[_k].shape[0])) if config.max_position_embeddings > state_dict[_k].shape[0]: old_size = state_dict[_k].shape[0] # state_dict[_k].data = state_dict[_k].data.resize_(config.max_position_embeddings, state_dict[_k].shape[1]) state_dict[_k].resize_( config.max_position_embeddings, state_dict[_k].shape[1]) start = old_size while start < config.max_position_embeddings: chunk_size = min( old_size, config.max_position_embeddings - start) state_dict[_k].data[start:start+chunk_size, :].copy_(state_dict[_k].data[:chunk_size, :]) start += chunk_size elif config.max_position_embeddings < state_dict[_k].shape[0]: state_dict[_k].data = state_dict[_k].data[:config.max_position_embeddings, :] # initialize relax projection _k = 'cls.predictions.transform.dense.weight' n_config_relax = 1 if (config.relax_projection < 1) else config.relax_projection if (_k in state_dict) and (n_config_relax*config.hidden_size != state_dict[_k].shape[0]): logger.info("n_config_relax*config.hidden_size != state_dict[cls.predictions.transform.dense.weight] ({0}*{1} != {2})".format( n_config_relax, config.hidden_size, state_dict[_k].shape[0])) assert state_dict[_k].shape[0] % config.hidden_size == 0 n_state_relax = int(state_dict[_k].shape[0]/config.hidden_size) assert (n_state_relax == 1) != (n_config_relax == 1), "!!!!n_state_relax == 1 xor n_config_relax == 1!!!!" if n_state_relax == 1: _k = 'cls.predictions.transform.dense.weight' state_dict[_k].data = state_dict[_k].data.unsqueeze(0).repeat( n_config_relax, 1, 1).reshape((n_config_relax*config.hidden_size, config.hidden_size)) for _k in ('cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'): state_dict[_k].data = state_dict[_k].data.unsqueeze( 0).repeat(n_config_relax, 1).view(-1) elif n_config_relax == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 _k = 'cls.predictions.transform.dense.weight' state_dict[_k].data = state_dict[_k].data.view( n_state_relax, config.hidden_size, config.hidden_size).select(0, _task_idx) for _k in ('cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'): state_dict[_k].data = state_dict[_k].data.view( n_state_relax, config.hidden_size).select(0, _task_idx) # initialize QKV _all_head_size = config.num_attention_heads * \ int(config.hidden_size / config.num_attention_heads) n_config_num_qkv = 1 if (config.num_qkv < 1) else config.num_qkv for qkv_name in ('query', 'key', 'value'): _k = 'bert.encoder.layer.0.attention.self.{0}.weight'.format( qkv_name) if (_k in state_dict) and (n_config_num_qkv*_all_head_size != state_dict[_k].shape[0]): logger.info("n_config_num_qkv*_all_head_size != state_dict[_k] ({0}*{1} != {2})".format( n_config_num_qkv, _all_head_size, state_dict[_k].shape[0])) for layer_idx in range(config.num_hidden_layers): _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) assert state_dict[_k].shape[0] % _all_head_size == 0 n_state_qkv = int(state_dict[_k].shape[0]/_all_head_size) assert (n_state_qkv == 1) != (n_config_num_qkv == 1), "!!!!n_state_qkv == 1 xor n_config_num_qkv == 1!!!!" if n_state_qkv == 1: _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.unsqueeze(0).repeat( n_config_num_qkv, 1, 1).reshape((n_config_num_qkv*_all_head_size, _all_head_size)) _k = 'bert.encoder.layer.{0}.attention.self.{1}.bias'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.unsqueeze( 0).repeat(n_config_num_qkv, 1).view(-1) elif n_config_num_qkv == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 assert _task_idx != 3, "[INVALID] _task_idx=3: n_config_num_qkv=1 (should be 2)" if _task_idx == 0: _qkv_idx = 0 else: _qkv_idx = 1 _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.view( n_state_qkv, _all_head_size, _all_head_size).select(0, _qkv_idx) _k = 'bert.encoder.layer.{0}.attention.self.{1}.bias'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.view( n_state_qkv, _all_head_size).select(0, _qkv_idx) if _word_emb_map: _k = 'bert.embeddings.word_embeddings.weight' for _tgt, _src in _word_emb_map: state_dict[_k].data[_tgt, :].copy_( state_dict[_k].data[_src, :]) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') model.missing_keys = missing_keys if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: logger.info('\n'.join(error_msgs)) if tempdir: # Clean up temp dir shutil.rmtree(tempdir) return model class BertModel(PreTrainedBertModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLF`) to train on the Next-Sentence task (see BERT's paper). ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def rescale_some_parameters(self): for layer_id, layer in enumerate(self.encoder.layer): layer.attention.output.dense.weight.data.div_( math.sqrt(2.0*(layer_id + 1))) layer.output.dense.weight.data.div_(math.sqrt(2.0*(layer_id + 1))) def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. if attention_mask.dim() == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) elif attention_mask.dim() == 3: extended_attention_mask = attention_mask.unsqueeze(1) else: raise NotImplementedError # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to( dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, mask_qkv=None, task_idx=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, task_idx=task_idx) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertModelIncr(BertModel): def __init__(self, config): super(BertModelIncr, self).__init__(config) def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, position_ids, task_idx=task_idx) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return embedding_output, encoded_layers, pooled_output class BertForPreTraining(PreTrainedBertModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, mask_qkv=None, task_idx=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score class BertPreTrainingPairTransform(nn.Module): def __init__(self, config): super(BertPreTrainingPairTransform, self).__init__() self.dense = nn.Linear(config.hidden_size*2, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act # self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) def forward(self, pair_x, pair_y): hidden_states = torch.cat([pair_x, pair_y], dim=-1) hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) # hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertPreTrainingPairRel(nn.Module): def __init__(self, config, num_rel=0): super(BertPreTrainingPairRel, self).__init__() self.R_xy = BertPreTrainingPairTransform(config) self.rel_emb = nn.Embedding(num_rel, config.hidden_size) def forward(self, pair_x, pair_y, pair_r, pair_pos_neg_mask): # (batch, num_pair, hidden) xy = self.R_xy(pair_x, pair_y) r = self.rel_emb(pair_r) _batch, _num_pair, _hidden = xy.size() pair_score = (xy * r).sum(-1) # torch.bmm(xy.view(-1, 1, _hidden),r.view(-1, _hidden, 1)).view(_batch, _num_pair) # .mul_(-1.0): objective to loss return F.logsigmoid(pair_score * pair_pos_neg_mask.type_as(pair_score)).mul_(-1.0) class BertForPreTrainingLossMask(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, num_labels=2, num_rel=0, num_sentlvl_labels=0, no_nsp=False): super(BertForPreTrainingLossMask, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.num_sentlvl_labels = num_sentlvl_labels self.cls2 = None if self.num_sentlvl_labels > 0: self.secondary_pred_proj = nn.Embedding( num_sentlvl_labels, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=num_sentlvl_labels) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') if no_nsp: self.crit_next_sent = None else: self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) if hasattr(config, 'label_smoothing') and config.label_smoothing: self.crit_mask_lm_smoothed = LabelSmoothingLoss( config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none') else: self.crit_mask_lm_smoothed = None self.apply(self.init_bert_weights) self.bert.rescale_some_parameters() def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, masked_pos=None, masked_weights=None, task_idx=None, pair_x=None, pair_x_mask=None, pair_y=None, pair_y_mask=None, pair_r=None, pair_pos_neg_mask=None, pair_loss_mask=None, masked_pos_2=None, masked_weights_2=None, masked_labels_2=None, num_tokens_a=None, num_tokens_b=None, mask_qkv=None): if token_type_ids is None and attention_mask is None: task_0 = (task_idx == 0) task_1 = (task_idx == 1) task_2 = (task_idx == 2) task_3 = (task_idx == 3) sequence_length = input_ids.shape[-1] index_matrix = torch.arange(sequence_length).view( 1, sequence_length).to(input_ids.device) num_tokens = num_tokens_a + num_tokens_b base_mask = (index_matrix < num_tokens.view(-1, 1) ).type_as(input_ids) segment_a_mask = ( index_matrix < num_tokens_a.view(-1, 1)).type_as(input_ids) token_type_ids = ( task_idx + 1 + task_3.type_as(task_idx)).view(-1, 1) * base_mask token_type_ids = token_type_ids - segment_a_mask * \ (task_0 | task_3).type_as(segment_a_mask).view(-1, 1) index_matrix = index_matrix.view(1, 1, sequence_length) index_matrix_t = index_matrix.view(1, sequence_length, 1) tril = index_matrix <= index_matrix_t attention_mask_task_0 = ( index_matrix < num_tokens.view(-1, 1, 1)) & (index_matrix_t < num_tokens.view(-1, 1, 1)) attention_mask_task_1 = tril & attention_mask_task_0 attention_mask_task_2 = torch.transpose( tril, dim0=-2, dim1=-1) & attention_mask_task_0 attention_mask_task_3 = ( (index_matrix < num_tokens_a.view(-1, 1, 1)) | tril) & attention_mask_task_0 attention_mask = (attention_mask_task_0 & task_0.view(-1, 1, 1)) | \ (attention_mask_task_1 & task_1.view(-1, 1, 1)) | \ (attention_mask_task_2 & task_2.view(-1, 1, 1)) | \ (attention_mask_task_3 & task_3.view(-1, 1, 1)) attention_mask = attention_mask.type_as(input_ids) sequence_output, pooled_output = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) def gather_seq_out_by_pos_average(seq, pos, mask): # pos/mask: (batch, num_pair, max_token_num) batch_size, max_token_num = pos.size(0), pos.size(-1) # (batch, num_pair, max_token_num, seq.size(-1)) pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze( 2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1)) # (batch, num_pair, seq.size(-1)) mask = mask.type_as(pos_vec) pos_vec_masked_sum = ( pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2) return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum) def loss_mask_and_normalize(loss, mask): mask = mask.type_as(loss) loss = loss * mask denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() if masked_lm_labels is None: if masked_pos is None: prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output, task_idx=task_idx) else: sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) return prediction_scores, seq_relationship_score # masked lm sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores_masked, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels) masked_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), masked_weights) # next sentence if self.crit_next_sent is None or next_sentence_label is None: next_sentence_loss = 0.0 else: next_sentence_loss = self.crit_next_sent( seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1)) if self.cls2 is not None and masked_pos_2 is not None: sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None) masked_lm_loss_2 = self.crit_mask_lm( prediction_scores_masked_2.transpose(1, 2).float(), masked_labels_2) masked_lm_loss_2 = loss_mask_and_normalize( masked_lm_loss_2.float(), masked_weights_2) masked_lm_loss = masked_lm_loss + masked_lm_loss_2 if pair_x is None or pair_y is None or pair_r is None or pair_pos_neg_mask is None or pair_loss_mask is None: return masked_lm_loss, next_sentence_loss # pair and relation if pair_x_mask is None or pair_y_mask is None: pair_x_output_masked = gather_seq_out_by_pos( sequence_output, pair_x) pair_y_output_masked = gather_seq_out_by_pos( sequence_output, pair_y) else: pair_x_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_x, pair_x_mask) pair_y_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_y, pair_y_mask) pair_loss = self.crit_pair_rel( pair_x_output_masked, pair_y_output_masked, pair_r, pair_pos_neg_mask) pair_loss = loss_mask_and_normalize( pair_loss.float(), pair_loss_mask) return masked_lm_loss, next_sentence_loss, pair_loss class BertForExtractiveSummarization(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config): super(BertForExtractiveSummarization, self).__init__(config) self.bert = BertModel(config) self.secondary_pred_proj = nn.Embedding(2, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_pos_2=None, masked_weights_2=None, task_idx=None, mask_qkv=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None, task_idx=task_idx) predicted_probs = torch.nn.functional.softmax( prediction_scores_masked_2, dim=-1) return predicted_probs, masked_pos_2, masked_weights_2 class BertForSeq2SeqDecoder(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0, search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0, forbid_duplicate_ngrams=False, forbid_ignore_set=None, not_predict_set=None, ngram_size=3, min_len=0, mode="s2s", pos_shift=False): super(BertForSeq2SeqDecoder, self).__init__(config) self.bert = BertModelIncr(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.apply(self.init_bert_weights) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.mask_word_id = mask_word_id self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) self.search_beam_size = search_beam_size self.length_penalty = length_penalty self.eos_id = eos_id self.sos_id = sos_id self.forbid_duplicate_ngrams = forbid_duplicate_ngrams self.forbid_ignore_set = forbid_ignore_set self.not_predict_set = not_predict_set self.ngram_size = ngram_size self.min_len = min_len assert mode in ("s2s", "l2r") self.mode = mode self.pos_shift = pos_shift def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): if self.search_beam_size > 1: return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask, task_idx=task_idx, mask_qkv=mask_qkv) input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1] curr_attention_mask = attention_mask[:, start_pos:next_pos+1, :next_pos+1] curr_position_ids = position_ids[:, start_pos:next_pos+1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) if self.not_predict_set: for token_id in self.not_predict_set: prediction_scores[:, :, token_id].fill_(-10000.0) _, max_ids = torch.max(prediction_scores, dim=-1) output_ids.append(max_ids) if self.pos_shift: if prev_embedding is None: prev_embedding = new_embedding else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] else: if prev_embedding is None: prev_embedding = new_embedding[:, :-1, :] else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x[:, :-1, :] for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] curr_ids = max_ids next_pos += 1 return torch.cat(output_ids, dim=1) def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) K = self.search_beam_size total_scores = [] beam_masks = [] step_ids = [] step_back_ptrs = [] partial_seqs = [] forbid_word_mask = None buf_matrix = None while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1] curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1] curr_position_ids = position_ids[:, start_pos:next_pos + 1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) log_scores = torch.nn.functional.log_softmax( prediction_scores, dim=-1) if forbid_word_mask is not None: log_scores += (forbid_word_mask * -10000.0) if self.min_len and (next_pos-input_length+1 <= self.min_len): log_scores[:, :, self.eos_id].fill_(-10000.0) if self.not_predict_set: for token_id in self.not_predict_set: log_scores[:, :, token_id].fill_(-10000.0) kk_scores, kk_ids = torch.topk(log_scores, k=K) if len(total_scores) == 0: k_ids = torch.reshape(kk_ids, [batch_size, K]) back_ptrs = torch.zeros(batch_size, K, dtype=torch.long) k_scores = torch.reshape(kk_scores, [batch_size, K]) else: last_eos = torch.reshape( beam_masks[-1], [batch_size * K, 1, 1]) last_seq_scores = torch.reshape( total_scores[-1], [batch_size * K, 1, 1]) kk_scores += last_eos * (-10000.0) + last_seq_scores kk_scores = torch.reshape(kk_scores, [batch_size, K * K]) k_scores, k_ids = torch.topk(kk_scores, k=K) back_ptrs = torch.div(k_ids, K) kk_ids = torch.reshape(kk_ids, [batch_size, K * K]) k_ids = torch.gather(kk_ids, 1, k_ids) step_back_ptrs.append(back_ptrs) step_ids.append(k_ids) beam_masks.append(torch.eq(k_ids, self.eos_id).float()) total_scores.append(k_scores) def first_expand(x): input_shape = list(x.size()) expanded_shape = input_shape[:1] + [1] + input_shape[1:] x = torch.reshape(x, expanded_shape) repeat_count = [1, K] + [1] * (len(input_shape) - 1) x = x.repeat(*repeat_count) x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:]) return x def select_beam_items(x, ids): id_shape = list(ids.size()) id_rank = len(id_shape) assert len(id_shape) == 2 x_shape = list(x.size()) x = torch.reshape(x, [batch_size, K] + x_shape[1:]) x_rank = len(x_shape) + 1 assert x_rank >= 2 if id_rank < x_rank: ids = torch.reshape( ids, id_shape + [1] * (x_rank - id_rank)) ids = ids.expand(id_shape + x_shape[1:]) y = torch.gather(x, 1, ids) y = torch.reshape(y, x_shape) return y is_first = (prev_embedding is None) if self.pos_shift: if prev_embedding is None: prev_embedding = first_expand(new_embedding) else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] else: if prev_embedding is None: prev_embedding = first_expand(new_embedding[:, :-1, :]) else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x[:, :-1, :]) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] curr_ids = torch.reshape(k_ids, [batch_size * K, 1]) if is_first: token_type_ids = first_expand(token_type_ids) position_ids = first_expand(position_ids) attention_mask = first_expand(attention_mask) mask_ids = first_expand(mask_ids) if mask_qkv is not None: mask_qkv = first_expand(mask_qkv) if self.forbid_duplicate_ngrams: wids = step_ids[-1].tolist() ptrs = step_back_ptrs[-1].tolist() if is_first: partial_seqs = [] for b in range(batch_size): for k in range(K): partial_seqs.append([wids[b][k]]) else: new_partial_seqs = [] for b in range(batch_size): for k in range(K): new_partial_seqs.append( partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]]) partial_seqs = new_partial_seqs def get_dup_ngram_candidates(seq, n): cands = set() if len(seq) < n: return [] tail = seq[-(n-1):] if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail): return [] for i in range(len(seq) - (n - 1)): mismatch = False for j in range(n - 1): if tail[j] != seq[i + j]: mismatch = True break if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)): cands.add(seq[i + n - 1]) return list(sorted(cands)) if len(partial_seqs[0]) >= self.ngram_size: dup_cands = [] for seq in partial_seqs: dup_cands.append( get_dup_ngram_candidates(seq, self.ngram_size)) if max(len(x) for x in dup_cands) > 0: if buf_matrix is None: vocab_size = list(log_scores.size())[-1] buf_matrix = np.zeros( (batch_size * K, vocab_size), dtype=float) else: buf_matrix.fill(0) for bk, cands in enumerate(dup_cands): for i, wid in enumerate(cands): buf_matrix[bk, wid] = 1.0 forbid_word_mask = torch.tensor( buf_matrix, dtype=log_scores.dtype) forbid_word_mask = torch.reshape( forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda() else: forbid_word_mask = None next_pos += 1 # [(batch, beam)] total_scores = [x.tolist() for x in total_scores] step_ids = [x.tolist() for x in step_ids] step_back_ptrs = [x.tolist() for x in step_back_ptrs] # back tracking traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []} for b in range(batch_size): # [(beam,)] scores = [x[b] for x in total_scores] wids_list = [x[b] for x in step_ids] ptrs = [x[b] for x in step_back_ptrs] traces['scores'].append(scores) traces['wids'].append(wids_list) traces['ptrs'].append(ptrs) # first we need to find the eos frame where all symbols are eos # any frames after the eos frame are invalid last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid == self.eos_id for wid in wids): last_frame_id = i break max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if wid == self.eos_id or fid == last_frame_id: s = scores[fid][i] if self.length_penalty > 0: s /= math.pow((5 + fid + 1) / 6.0, self.length_penalty) if s > max_score: max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: traces['pred_seq'].append([0]) else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() traces['pred_seq'].append(seq) def _pad_sequence(sequences, max_len, padding_value=0): trailing_dims = sequences[0].size()[1:] out_dims = (len(sequences), max_len) + trailing_dims out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) for i, tensor in enumerate(sequences): length = tensor.size(0) # use index notation to prevent duplicate references to the tensor out_tensor[i, :length, ...] = tensor return out_tensor # convert to tensors for DataParallel for k in ('pred_seq', 'scores', 'wids', 'ptrs'): ts_list = traces[k] if not isinstance(ts_list[0], torch.Tensor): dt = torch.float if k == 'scores' else torch.long ts_list = [torch.tensor(it, dtype=dt) for it in ts_list] traces[k] = _pad_sequence( ts_list, output_length, padding_value=0).to(input_ids.device) return traces class BertForMaskedLM(PreTrainedBertModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead( config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, mask_qkv=None, task_idx=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(PreTrainedBertModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, mask_qkv=None, task_idx=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) seq_relationship_score = self.cls(pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(PreTrainedBertModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels=2): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): _, pooled_output = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: if labels.dtype == torch.long: loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(-1, self.num_labels), labels.view(-1)) elif labels.dtype == torch.half or labels.dtype == torch.float: loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: print('unkown labels.dtype') loss = None return loss else: return logits class BertForMultipleChoice(PreTrainedBertModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices=2): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert( flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(PreTrainedBertModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels=2): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): sequence_output, _ = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct( logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(PreTrainedBertModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: either - a BertConfig class instance with the configuration to build a new model, or - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` The pre-trained model will be downloaded and cached if needed. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. Outputs: if `start_positions` and `end_positions` are not `None`: Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. if `start_positions` or `end_positions` is `None`: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, task_idx=None): sequence_output, _ = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, task_idx=task_idx) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/modeling.py
# coding: utf8 def main(): import sys try: from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ModuleNotFoundError: print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) != 5: # pylint: disable=line-too-long print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`") else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) if __name__ == '__main__': main()
data2vec_vision-main
unilm-v1/src/pytorch_pretrained_bert/__main__.py
#!/usr/bin/env python from __future__ import print_function __author__ = 'xinya' from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys reload(sys) sys.setdefaultencoding('utf-8') _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) class QGEvalCap: def __init__(self, gts, res): self.gts = gts self.res = res def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = " ".join( detokenize(line[:-1].strip().split())).lower() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1].strip().lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare") parser.add_argument("-src", "--src_file", dest="src_file", default="./qg_data/test/test.pa.txt", help="src file") parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="./qg_data/test/test.q.tok.txt", help="target file") args = parser.parse_args() print("scores: \n") eval(args.out_file, args.src_file, args.tgt_file)
data2vec_vision-main
unilm-v1/src/qg/eval_on_unilm_tokenized_ref.py
#!/usr/bin/env python from __future__ import print_function __author__ = 'xinya' from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys reload(sys) sys.setdefaultencoding('utf-8') _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) class QGEvalCap: def __init__(self, gts, res): self.gts = gts self.res = res def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1].strip() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = fix_tokenization(line[:-1].strip()).lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare") parser.add_argument("-src", "--src_file", dest="src_file", default="./qg_data/test/test.pa.txt", help="src file") parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="./qg_data/nqg_processed_data/tgt-test.txt", help="target file") args = parser.parse_args() print("scores: \n") eval(args.out_file, args.src_file, args.tgt_file)
data2vec_vision-main
unilm-v1/src/qg/eval.py
data2vec_vision-main
unilm-v1/src/gigaword/__init__.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer # pip install pyrouge from gigaword.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=0, help="Truncate line by the maximum length.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"] evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace('1', '#') buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) if __name__ == "__main__": main()
data2vec_vision-main
unilm-v1/src/gigaword/eval.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from pyrouge.utils import log from pyrouge.utils.file_utils import verify_dir REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def clean(x): return re.sub( r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''", lambda m: REMAP.get(m.group()), x) class DirectoryProcessor: @staticmethod def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(clean(output_string.lower())) logger.info("Saved processed files to {}.".format(output_dir)) class Rouge155(object): """ This is a wrapper for the ROUGE 1.5.5 summary evaluation package. This class is designed to simplify the evaluation process by: 1) Converting summaries into a format ROUGE understands. 2) Generating the ROUGE configuration file automatically based on filename patterns. This class can be used within Python like this: rouge = Rouge155() rouge.system_dir = 'test/systems' rouge.model_dir = 'test/models' # The system filename pattern should contain one group that # matches the document ID. rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html' # The model filename pattern has '#ID#' as a placeholder for the # document ID. If there are multiple model summaries, pyrouge # will use the provided regex to automatically match them with # the corresponding system summary. Here, [A-Z] matches # multiple model summaries for a given #ID#. rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate() print(rouge_output) output_dict = rouge.output_to_dict(rouge_ouput) print(output_dict) -> {'rouge_1_f_score': 0.95652, 'rouge_1_f_score_cb': 0.95652, 'rouge_1_f_score_ce': 0.95652, 'rouge_1_precision': 0.95652, [...] To evaluate multiple systems: rouge = Rouge155() rouge.system_dir = '/PATH/TO/systems' rouge.model_dir = 'PATH/TO/models' for system_id in ['id1', 'id2', 'id3']: rouge.system_filename_pattern = \ 'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id) rouge.model_filename_pattern = \ 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate(system_id) print(rouge_output) """ def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None): """ Create a Rouge155 object. rouge_dir: Directory containing Rouge-1.5.5.pl rouge_args: Arguments to pass through to ROUGE if you don't want to use the default pyrouge arguments. """ self.temp_dir = temp_dir self.log = log.get_global_console_logger() self.__set_dir_properties() self._config_file = None self._settings_file = self.__get_config_path() self.__set_rouge_dir(rouge_dir) self.args = self.__clean_rouge_args(rouge_args) self._system_filename_pattern = None self._model_filename_pattern = None def save_home_dir(self): config = ConfigParser() section = 'pyrouge settings' config.add_section(section) config.set(section, 'home_dir', self._home_dir) with open(self._settings_file, 'w') as f: config.write(f) self.log.info("Set ROUGE home directory to {}.".format(self._home_dir)) @property def settings_file(self): """ Path of the setttings file, which stores the ROUGE home dir. """ return self._settings_file @property def bin_path(self): """ The full path of the ROUGE binary (although it's technically a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl """ if self._bin_path is None: raise Exception( "ROUGE path not set. Please set the ROUGE home directory " "and ensure that ROUGE-1.5.5.pl exists in it.") return self._bin_path @property def system_filename_pattern(self): """ The regular expression pattern for matching system summary filenames. The regex string. E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. Currently, there is no support for multiple systems. """ return self._system_filename_pattern @system_filename_pattern.setter def system_filename_pattern(self, pattern): self._system_filename_pattern = pattern @property def model_filename_pattern(self): """ The regular expression pattern for matching model summary filenames. The pattern needs to contain the string "#ID#", which is a placeholder for the document ID. E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. "#ID#" is a placeholder for the document ID which has been matched by the "(\d+)" part of the system filename pattern. The different model summaries for a given document ID are matched by the "[A-Z]" part. """ return self._model_filename_pattern @model_filename_pattern.setter def model_filename_pattern(self, pattern): self._model_filename_pattern = pattern @property def config_file(self): return self._config_file @config_file.setter def config_file(self, path): config_dir, _ = os.path.split(path) verify_dir(config_dir, "configuration file") self._config_file = path def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() def sent_split_to_string(s): return "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func) @staticmethod def convert_summaries_to_rouge_format(input_dir, output_dir): """ Convert all files in input_dir into a format ROUGE understands and saves the files to output_dir. The input files are assumed to be plain text with one sentence per line. input_dir: Path of directory containing the input files. output_dir: Path of directory in which the converted files will be saved. """ DirectoryProcessor.process( input_dir, output_dir, Rouge155.convert_text_to_rouge_format) @staticmethod def convert_text_to_rouge_format(text, title="dummy title"): """ Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string. """ sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html @staticmethod def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = [model_filename_pattern.replace('#ID#', id)] # model_filenames = Rouge155.__get_model_filenames_for_id( # id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>") def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp(dir=self.temp_dir) config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file)) def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ # 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results ################################################################### # Private methods def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path)) def __get_rouge_home_dir_from_settings(self): config = ConfigParser() with open(self._settings_file) as f: if hasattr(config, "read_file"): config.read_file(f) else: # use deprecated python 2.x method config.readfp(f) rouge_home_dir = config.get('pyrouge settings', 'home_dir') return rouge_home_dir @staticmethod def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp(dir=self.temp_dir) new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir def __write_summaries(self): self.log.info("Writing summaries.") self.__process_summaries(self.convert_summaries_to_rouge_format) @staticmethod def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern): pattern = re.compile(model_filenames_pattern.replace('#ID#', id)) model_filenames = [ f for f in os.listdir(model_dir) if pattern.match(f)] if not model_filenames: raise Exception( "Could not find any model summaries for the system" " summary with ID {}. Specified model filename pattern was: " "{}".format(id, model_filenames_pattern)) return model_filenames def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, # '-2', # '-1', # '-U', '-m', # '-v', '-r', 1000, '-n', 2, # '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p) def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring) def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args def __add_config_option(self, options): return options + [self._config_file] def __get_config_path(self): if platform.system() == "Windows": parent_dir = os.getenv("APPDATA") config_dir_name = "pyrouge" elif os.name == "posix": parent_dir = os.path.expanduser("~") config_dir_name = ".pyrouge" else: parent_dir = os.path.dirname(__file__) config_dir_name = "" config_dir = os.path.join(parent_dir, config_dir_name) if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, 'settings.ini') if __name__ == "__main__": import argparse from utils.argparsers import rouge_path_parser parser = argparse.ArgumentParser(parents=[rouge_path_parser]) args = parser.parse_args() rouge = Rouge155(args.rouge_home) rouge.save_home_dir()
data2vec_vision-main
unilm-v1/src/gigaword/bs_pyrouge.py
data2vec_vision-main
unilm-v1/src/cnndm/__init__.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer # pip install pyrouge from cnndm.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=60, help="Truncate line by the maximum length.") parser.add_argument("--duplicate_rate", type=float, default=0.7, help="If the duplicat rate (compared with history) is large, we can discard the current sentence.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"] evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {"(": "-LRB-", ")": "-RRB-", "[": "-LSB-", "]": "-RSB-", "{": "-LCB-", "}": "-RCB-"} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def remove_duplicate(l_list, duplicate_rate): tk_list = [l.lower().split() for l in l_list] r_list = [] history_set = set() for i, w_list in enumerate(tk_list): w_set = set(w_list) if len(w_set & history_set)/len(w_set) <= duplicate_rate: r_list.append(l_list[i]) history_set |= w_set return r_list def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip().replace(" <S_SEP> ", '\n') gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] for sentence in l.strip().split("[X_SEP]"): sentence = fix_tokenization(sentence) if any(get_f1(sentence, s) > 1.0 for s in buf): continue s_len = len(sentence.split()) if s_len <= 4: continue buf.append(sentence) if args.duplicate_rate and args.duplicate_rate < 1: buf = remove_duplicate(buf, args.duplicate_rate) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.replace('\n', ' [X_SEP] ').strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) if __name__ == "__main__": main()
data2vec_vision-main
unilm-v1/src/cnndm/eval.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from pyrouge.utils import log from pyrouge.utils.file_utils import verify_dir REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def clean(x): return re.sub( r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''", lambda m: REMAP.get(m.group()), x) class DirectoryProcessor: @staticmethod def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(clean(output_string.lower())) logger.info("Saved processed files to {}.".format(output_dir)) class Rouge155(object): """ This is a wrapper for the ROUGE 1.5.5 summary evaluation package. This class is designed to simplify the evaluation process by: 1) Converting summaries into a format ROUGE understands. 2) Generating the ROUGE configuration file automatically based on filename patterns. This class can be used within Python like this: rouge = Rouge155() rouge.system_dir = 'test/systems' rouge.model_dir = 'test/models' # The system filename pattern should contain one group that # matches the document ID. rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html' # The model filename pattern has '#ID#' as a placeholder for the # document ID. If there are multiple model summaries, pyrouge # will use the provided regex to automatically match them with # the corresponding system summary. Here, [A-Z] matches # multiple model summaries for a given #ID#. rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate() print(rouge_output) output_dict = rouge.output_to_dict(rouge_ouput) print(output_dict) -> {'rouge_1_f_score': 0.95652, 'rouge_1_f_score_cb': 0.95652, 'rouge_1_f_score_ce': 0.95652, 'rouge_1_precision': 0.95652, [...] To evaluate multiple systems: rouge = Rouge155() rouge.system_dir = '/PATH/TO/systems' rouge.model_dir = 'PATH/TO/models' for system_id in ['id1', 'id2', 'id3']: rouge.system_filename_pattern = \ 'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id) rouge.model_filename_pattern = \ 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate(system_id) print(rouge_output) """ def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None): """ Create a Rouge155 object. rouge_dir: Directory containing Rouge-1.5.5.pl rouge_args: Arguments to pass through to ROUGE if you don't want to use the default pyrouge arguments. """ self.temp_dir = temp_dir self.log = log.get_global_console_logger() self.__set_dir_properties() self._config_file = None self._settings_file = self.__get_config_path() self.__set_rouge_dir(rouge_dir) self.args = self.__clean_rouge_args(rouge_args) self._system_filename_pattern = None self._model_filename_pattern = None def save_home_dir(self): config = ConfigParser() section = 'pyrouge settings' config.add_section(section) config.set(section, 'home_dir', self._home_dir) with open(self._settings_file, 'w') as f: config.write(f) self.log.info("Set ROUGE home directory to {}.".format(self._home_dir)) @property def settings_file(self): """ Path of the setttings file, which stores the ROUGE home dir. """ return self._settings_file @property def bin_path(self): """ The full path of the ROUGE binary (although it's technically a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl """ if self._bin_path is None: raise Exception( "ROUGE path not set. Please set the ROUGE home directory " "and ensure that ROUGE-1.5.5.pl exists in it.") return self._bin_path @property def system_filename_pattern(self): """ The regular expression pattern for matching system summary filenames. The regex string. E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. Currently, there is no support for multiple systems. """ return self._system_filename_pattern @system_filename_pattern.setter def system_filename_pattern(self, pattern): self._system_filename_pattern = pattern @property def model_filename_pattern(self): """ The regular expression pattern for matching model summary filenames. The pattern needs to contain the string "#ID#", which is a placeholder for the document ID. E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. "#ID#" is a placeholder for the document ID which has been matched by the "(\d+)" part of the system filename pattern. The different model summaries for a given document ID are matched by the "[A-Z]" part. """ return self._model_filename_pattern @model_filename_pattern.setter def model_filename_pattern(self, pattern): self._model_filename_pattern = pattern @property def config_file(self): return self._config_file @config_file.setter def config_file(self, path): config_dir, _ = os.path.split(path) verify_dir(config_dir, "configuration file") self._config_file = path def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() def sent_split_to_string(s): return "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func) @staticmethod def convert_summaries_to_rouge_format(input_dir, output_dir): """ Convert all files in input_dir into a format ROUGE understands and saves the files to output_dir. The input files are assumed to be plain text with one sentence per line. input_dir: Path of directory containing the input files. output_dir: Path of directory in which the converted files will be saved. """ DirectoryProcessor.process( input_dir, output_dir, Rouge155.convert_text_to_rouge_format) @staticmethod def convert_text_to_rouge_format(text, title="dummy title"): """ Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string. """ sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html @staticmethod def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = [model_filename_pattern.replace('#ID#', id)] # model_filenames = Rouge155.__get_model_filenames_for_id( # id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>") def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp(dir=self.temp_dir) config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file)) def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ # 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results ################################################################### # Private methods def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path)) def __get_rouge_home_dir_from_settings(self): config = ConfigParser() with open(self._settings_file) as f: if hasattr(config, "read_file"): config.read_file(f) else: # use deprecated python 2.x method config.readfp(f) rouge_home_dir = config.get('pyrouge settings', 'home_dir') return rouge_home_dir @staticmethod def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp(dir=self.temp_dir) new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir def __write_summaries(self): self.log.info("Writing summaries.") self.__process_summaries(self.convert_summaries_to_rouge_format) @staticmethod def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern): pattern = re.compile(model_filenames_pattern.replace('#ID#', id)) model_filenames = [ f for f in os.listdir(model_dir) if pattern.match(f)] if not model_filenames: raise Exception( "Could not find any model summaries for the system" " summary with ID {}. Specified model filename pattern was: " "{}".format(id, model_filenames_pattern)) return model_filenames def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, # '-2', # '-1', # '-U', '-m', # '-v', '-r', 1000, '-n', 2, # '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p) def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring) def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args def __add_config_option(self, options): return options + [self._config_file] def __get_config_path(self): if platform.system() == "Windows": parent_dir = os.getenv("APPDATA") config_dir_name = "pyrouge" elif os.name == "posix": parent_dir = os.path.expanduser("~") config_dir_name = ".pyrouge" else: parent_dir = os.path.dirname(__file__) config_dir_name = "" config_dir = os.path.join(parent_dir, config_dir_name) if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, 'settings.ini') if __name__ == "__main__": import argparse from utils.argparsers import rouge_path_parser parser = argparse.ArgumentParser(parents=[rouge_path_parser]) args = parser.parse_args() rouge = Rouge155(args.rouge_home) rouge.save_home_dir()
data2vec_vision-main
unilm-v1/src/cnndm/bs_pyrouge.py
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i] def batch_list_to_batch_tensors(batch): batch_tensors = [] for x in zip(*batch): if x[0] is None: batch_tensors.append(None) elif isinstance(x[0], torch.Tensor): batch_tensors.append(torch.stack(x)) else: batch_tensors.append(torch.tensor(x, dtype=torch.long)) return batch_tensors class TrieNode(object): def __init__(self): self.children = {} self.is_leaf = False def try_get_children(self, key): if key not in self.children: self.children[key] = TrieNode() return self.children[key] class TrieTree(object): def __init__(self): self.root = TrieNode() def add(self, tokens): r = self.root for token in tokens: r = r.try_get_children(token) r.is_leaf = True def get_pieces(self, tokens, offset): pieces = [] r = self.root token_id = 0 last_valid = 0 match_count = 0 while last_valid < len(tokens): if token_id < len(tokens) and tokens[token_id] in r.children: r = r.children[tokens[token_id]] match_count += 1 if r.is_leaf: last_valid = token_id token_id += 1 else: pieces.append( list(range(token_id - match_count + offset, last_valid + 1 + offset))) last_valid += 1 token_id = last_valid r = self.root match_count = 0 return pieces def _get_word_split_index(tokens, st, end): split_idx = [] i = st while i < end: if (not tokens[i].startswith('##')) or (i == st): split_idx.append(i) i += 1 split_idx.append(end) return split_idx def _expand_whole_word(tokens, st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tokens[new_end].startswith('##'): new_end += 1 return new_st, new_end class Pipeline(): """ Pre-process Pipeline Class : callable """ def __init__(self): super().__init__() self.skipgram_prb = None self.skipgram_size = None self.pre_whole_word = None self.mask_whole_word = None self.word_subsample_prb = None self.sp_prob = None self.pieces_dir = None self.vocab_words = None self.pieces_threshold = 10 self.trie = None self.call_count = 0 self.offline_mode = False self.skipgram_size_geo_list = None self.span_same_mask = False def init_skipgram_size_geo_list(self, p): if p > 0: g_list = [] t = p for _ in range(self.skipgram_size): g_list.append(t) t *= (1-p) s = sum(g_list) self.skipgram_size_geo_list = [x/s for x in g_list] def create_trie_tree(self, pieces_dir): print("sp_prob = {}".format(self.sp_prob)) print("pieces_threshold = {}".format(self.pieces_threshold)) if pieces_dir is not None: self.trie = TrieTree() pieces_files = [pieces_dir] for token in self.vocab_words: self.trie.add([token]) for piece_file in pieces_files: print("Load piece file: {}".format(piece_file)) with open(piece_file, mode='r', encoding='utf-8') as reader: for line in reader: parts = line.split('\t') if int(parts[-1]) < self.pieces_threshold: pass tokens = [] for part in parts[:-1]: tokens.extend(part.split(' ')) self.trie.add(tokens) def __call__(self, instance): raise NotImplementedError # pre_whole_word: tokenize to words before masking # post whole word (--mask_whole_word): expand to words after masking def get_masked_pos(self, tokens, n_pred, add_skipgram=False, mask_segment=None, protect_range=None): if self.pieces_dir is not None and self.trie is None: self.create_trie_tree(self.pieces_dir) if self.pre_whole_word: if self.trie is not None: pieces = self.trie.get_pieces(tokens, 0) new_pieces = [] for piece in pieces: if len(new_pieces) > 0 and tokens[piece[0]].startswith("##"): new_pieces[-1].extend(piece) else: new_pieces.append(piece) del pieces pieces = new_pieces pre_word_split = list(_[-1] for _ in pieces) pre_word_split.append(len(tokens)) else: pre_word_split = _get_word_split_index(tokens, 0, len(tokens)) index2piece = None else: pre_word_split = list(range(0, len(tokens)+1)) if self.trie is not None: pieces = self.trie.get_pieces(tokens, 0) index2piece = {} for piece in pieces: for index in piece: index2piece[index] = (piece[0], piece[-1]) else: index2piece = None span_list = list(zip(pre_word_split[:-1], pre_word_split[1:])) # candidate positions of masked tokens cand_pos = [] special_pos = set() if mask_segment: for i, sp in enumerate(span_list): sp_st, sp_end = sp if (sp_end-sp_st == 1) and tokens[sp_st].endswith('SEP]'): segment_index = i break for i, sp in enumerate(span_list): sp_st, sp_end = sp if (sp_end-sp_st == 1) and (tokens[sp_st].endswith('CLS]') or tokens[sp_st].endswith('SEP]')): special_pos.add(i) else: if mask_segment: if ((i < segment_index) and ('a' in mask_segment)) or ((i > segment_index) and ('b' in mask_segment)): cand_pos.append(i) else: cand_pos.append(i) shuffle(cand_pos) masked_pos = set() for i_span in cand_pos: if len(masked_pos) >= n_pred: break cand_st, cand_end = span_list[i_span] if len(masked_pos)+cand_end-cand_st > n_pred: continue if any(p in masked_pos for p in range(cand_st, cand_end)): continue n_span = 1 if index2piece is not None: p_start, p_end = index2piece[i_span] if p_start < p_end and (rand() < self.sp_prob): # n_span = p_end - p_start + 1 st_span, end_span = p_start, p_end + 1 else: st_span, end_span = i_span, i_span + 1 else: rand_skipgram_size = 0 # ngram if self.skipgram_size_geo_list: # sampling ngram size from geometric distribution rand_skipgram_size = np.random.choice( len(self.skipgram_size_geo_list), 1, p=self.skipgram_size_geo_list)[0] + 1 else: if add_skipgram and (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb): rand_skipgram_size = min( randint(2, self.skipgram_size), len(span_list)-i_span) for n in range(2, rand_skipgram_size+1): tail_st, tail_end = span_list[i_span+n-1] if (tail_end-tail_st == 1) and (tail_st in special_pos): break if len(masked_pos)+tail_end-cand_st > n_pred: break n_span = n st_span, end_span = i_span, i_span + n_span if self.mask_whole_word: # pre_whole_word==False: position index of span_list is the same as tokens st_span, end_span = _expand_whole_word( tokens, st_span, end_span) # subsampling according to frequency if self.word_subsample_prb: skip_pos = set() if self.pre_whole_word: w_span_list = span_list[st_span:end_span] else: split_idx = _get_word_split_index( tokens, st_span, end_span) w_span_list = list( zip(split_idx[:-1], split_idx[1:])) for i, sp in enumerate(w_span_list): sp_st, sp_end = sp if sp_end-sp_st == 1: w_cat = tokens[sp_st] else: w_cat = ''.join(tokens[sp_st:sp_end]) if (w_cat in self.word_subsample_prb) and (rand() < self.word_subsample_prb[w_cat]): for k in range(sp_st, sp_end): skip_pos.add(k) else: skip_pos = None for sp in range(st_span, end_span): for mp in range(span_list[sp][0], span_list[sp][1]): if not(skip_pos and (mp in skip_pos)) and (mp not in special_pos) and not(protect_range and (protect_range[0] <= mp < protect_range[1])): masked_pos.add(mp) if len(masked_pos) < n_pred: shuffle(cand_pos) for pos in cand_pos: if len(masked_pos) >= n_pred: break if pos not in masked_pos: masked_pos.add(pos) masked_pos = list(masked_pos) if len(masked_pos) > n_pred: # shuffle(masked_pos) masked_pos = masked_pos[:n_pred] return masked_pos def replace_masked_tokens(self, tokens, masked_pos): if self.span_same_mask: masked_pos = sorted(list(masked_pos)) prev_pos, prev_rand = None, None for pos in masked_pos: if self.span_same_mask and (pos-1 == prev_pos): t_rand = prev_rand else: t_rand = rand() if t_rand < 0.8: # 80% tokens[pos] = '[MASK]' elif t_rand < 0.9: # 10% tokens[pos] = get_random_word(self.vocab_words) prev_pos, prev_rand = pos, t_rand
data2vec_vision-main
unilm-v1/src/biunilm/loader_utils.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import argparse import math from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler import random import pickle from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer from pytorch_pretrained_bert.modeling import BertForSeq2SeqDecoder from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear from nn.data_parallel import DataParallelImbalance import biunilm.seq2seq_loader as seq2seq_loader logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def ascii_print(text): text = text.encode("ascii", "ignore") print(text) def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--model_recover_path", default=None, type=str, help="The file of fine-tuned pretraining model.") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument('--ffn_type', default=0, type=int, help="0: default mlp; 1: W((Wx+b) elem_prod x);") parser.add_argument('--num_qkv', default=0, type=int, help="Number of different <Q,K,V>.") parser.add_argument('--seg_emb', action='store_true', help="Using segment embedding for self-attention.") # decoding parameters parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--amp', action='store_true', help="Whether to use amp for fp16") parser.add_argument("--input_file", type=str, help="Input file") parser.add_argument('--subset', type=int, default=0, help="Decode a subset of the input dataset.") parser.add_argument("--output_file", type=str, help="output file") parser.add_argument("--split", type=str, default="", help="Data split (train/val/test).") parser.add_argument('--tokenized_input', action='store_true', help="Whether the input is tokenized.") parser.add_argument('--seed', type=int, default=123, help="random seed for initialization") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--new_segment_ids', action='store_true', help="Use new segment ids for bi-uni-directional LM.") parser.add_argument('--new_pos_ids', action='store_true', help="Use new position ids for LMs.") parser.add_argument('--batch_size', type=int, default=4, help="Batch size for decoding.") parser.add_argument('--beam_size', type=int, default=1, help="Beam size for searching") parser.add_argument('--length_penalty', type=float, default=0, help="Length penalty for beam search") parser.add_argument('--forbid_duplicate_ngrams', action='store_true') parser.add_argument('--forbid_ignore_word', type=str, default=None, help="Ignore the word during forbid_duplicate_ngrams") parser.add_argument("--min_len", default=None, type=int) parser.add_argument('--need_score_traces', action='store_true') parser.add_argument('--ngram_size', type=int, default=3) parser.add_argument('--mode', default="s2s", choices=["s2s", "l2r", "both"]) parser.add_argument('--max_tgt_length', type=int, default=128, help="maximum length of target sequence") parser.add_argument('--s2s_special_token', action='store_true', help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.") parser.add_argument('--s2s_add_segment', action='store_true', help="Additional segmental for the encoder of S2S.") parser.add_argument('--s2s_share_segment', action='store_true', help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).") parser.add_argument('--pos_shift', action='store_true', help="Using position shift for fine-tuning.") parser.add_argument('--not_predict_token', type=str, default=None, help="Do not predict the tokens during decoding.") args = parser.parse_args() if args.need_score_traces and args.beam_size <= 1: raise ValueError( "Score trace is only available for beam search with beam size > 1.") if args.max_tgt_length >= args.max_seq_length - 2: raise ValueError("Maximum tgt length exceeds max seq length - 2.") device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) tokenizer.max_len = args.max_seq_length pair_num_relation = 0 bi_uni_pipeline = [] bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, max_tgt_length=args.max_tgt_length, new_segment_ids=args.new_segment_ids, mode="s2s", num_qkv=args.num_qkv, s2s_special_token=args.s2s_special_token, s2s_add_segment=args.s2s_add_segment, s2s_share_segment=args.s2s_share_segment, pos_shift=args.pos_shift)) amp_handle = None if args.fp16 and args.amp: from apex import amp amp_handle = amp.init(enable_caching=True) logger.info("enable fp16 with amp") # Prepare model cls_num_labels = 2 type_vocab_size = 6 + \ (1 if args.s2s_add_segment else 0) if args.new_segment_ids else 2 mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids( ["[MASK]", "[SEP]", "[S2S_SOS]"]) def _get_token_id_set(s): r = None if s: w_list = [] for w in s.split('|'): if w.startswith('[') and w.endswith(']'): w_list.append(w.upper()) else: w_list.append(w) r = set(tokenizer.convert_tokens_to_ids(w_list)) return r forbid_ignore_set = _get_token_id_set(args.forbid_ignore_word) not_predict_set = _get_token_id_set(args.not_predict_token) print(args.model_recover_path) for model_recover_path in glob.glob(args.model_recover_path.strip()): logger.info("***** Recover model: %s *****", model_recover_path) model_recover = torch.load(model_recover_path) model = BertForSeq2SeqDecoder.from_pretrained(args.bert_model, state_dict=model_recover, num_labels=cls_num_labels, num_rel=pair_num_relation, type_vocab_size=type_vocab_size, task_idx=3, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, not_predict_set=not_predict_set, ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode, max_position_embeddings=args.max_seq_length, ffn_type=args.ffn_type, num_qkv=args.num_qkv, seg_emb=args.seg_emb, pos_shift=args.pos_shift) del model_recover if args.fp16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) torch.cuda.empty_cache() model.eval() next_i = 0 max_src_length = args.max_seq_length - 2 - args.max_tgt_length with open(args.input_file, encoding="utf-8") as fin: input_lines = [x.strip() for x in fin.readlines()] if args.subset > 0: logger.info("Decoding subset: %d", args.subset) input_lines = input_lines[:args.subset] data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer input_lines = [data_tokenizer.tokenize( x)[:max_src_length] for x in input_lines] input_lines = sorted(list(enumerate(input_lines)), key=lambda x: -len(x[1])) output_lines = [""] * len(input_lines) score_trace_list = [None] * len(input_lines) total_batch = math.ceil(len(input_lines) / args.batch_size) with tqdm(total=total_batch) as pbar: while next_i < len(input_lines): _chunk = input_lines[next_i:next_i + args.batch_size] buf_id = [x[0] for x in _chunk] buf = [x[1] for x in _chunk] next_i += args.batch_size max_a_len = max([len(x) for x in buf]) instances = [] for instance in [(x, max_a_len) for x in buf]: for proc in bi_uni_pipeline: instances.append(proc(instance)) with torch.no_grad(): batch = seq2seq_loader.batch_list_to_batch_tensors( instances) batch = [ t.to(device) if t is not None else None for t in batch] input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch traces = model(input_ids, token_type_ids, position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv) if args.beam_size > 1: traces = {k: v.tolist() for k, v in traces.items()} output_ids = traces['pred_seq'] else: output_ids = traces.tolist() for i in range(len(buf)): w_ids = output_ids[i] output_buf = tokenizer.convert_ids_to_tokens(w_ids) output_tokens = [] for t in output_buf: if t in ("[SEP]", "[PAD]"): break output_tokens.append(t) output_sequence = ' '.join(detokenize(output_tokens)) output_lines[buf_id[i]] = output_sequence if args.need_score_traces: score_trace_list[buf_id[i]] = { 'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]} pbar.update(1) if args.output_file: fn_out = args.output_file else: fn_out = model_recover_path+'.'+args.split with open(fn_out, "w", encoding="utf-8") as fout: for l in output_lines: fout.write(l) fout.write("\n") if args.need_score_traces: with open(fn_out + ".trace.pickle", "wb") as fout_trace: pickle.dump( {"version": 0.0, "num_samples": len(input_lines)}, fout_trace) for x in score_trace_list: pickle.dump(x, fout_trace) if __name__ == "__main__": main()
data2vec_vision-main
unilm-v1/src/biunilm/decode_seq2seq.py
data2vec_vision-main
unilm-v1/src/biunilm/__init__.py
from random import randint, shuffle, choice from random import random as rand import math import torch from biunilm.loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline # Input file format : # 1. One sentence per line. These should ideally be actual sentences, # not entire paragraphs or arbitrary spans of text. (Because we use # the sentence boundaries for the "next sentence prediction" task). # 2. Blank lines between documents. Document boundaries are needed # so that the "next sentence prediction" task doesn't span between documents. def truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False): num_truncated_a = [0, 0] num_truncated_b = [0, 0] while True: if len(tokens_a) + len(tokens_b) <= max_len: break if (max_len_a > 0) and len(tokens_a) > max_len_a: trunc_tokens = tokens_a num_truncated = num_truncated_a elif (max_len_b > 0) and len(tokens_b) > max_len_b: trunc_tokens = tokens_b num_truncated = num_truncated_b elif trunc_seg: # truncate the specified segment if trunc_seg == 'a': trunc_tokens = tokens_a num_truncated = num_truncated_a else: trunc_tokens = tokens_b num_truncated = num_truncated_b else: # truncate the longer segment if len(tokens_a) > len(tokens_b): trunc_tokens = tokens_a num_truncated = num_truncated_a else: trunc_tokens = tokens_b num_truncated = num_truncated_b # whether always truncate source sequences if (not always_truncate_tail) and (rand() < 0.5): del trunc_tokens[0] num_truncated[0] += 1 else: trunc_tokens.pop() num_truncated[1] += 1 return num_truncated_a, num_truncated_b class Seq2SeqDataset(torch.utils.data.Dataset): """ Load sentence pair (sequential or random order) from corpus """ def __init__(self, file_src, file_tgt, batch_size, tokenizer, max_len, file_oracle=None, short_sampling_prob=0.1, sent_reverse_order=False, bi_uni_pipeline=[]): super().__init__() self.tokenizer = tokenizer # tokenize function self.max_len = max_len # maximum length of tokens self.short_sampling_prob = short_sampling_prob self.bi_uni_pipeline = bi_uni_pipeline self.batch_size = batch_size self.sent_reverse_order = sent_reverse_order # read the file into memory self.ex_list = [] if file_oracle is None: with open(file_src, "r", encoding='utf-8') as f_src, open(file_tgt, "r", encoding='utf-8') as f_tgt: for src, tgt in zip(f_src, f_tgt): src_tk = tokenizer.tokenize(src.strip()) tgt_tk = tokenizer.tokenize(tgt.strip()) assert len(src_tk) > 0 assert len(tgt_tk) > 0 self.ex_list.append((src_tk, tgt_tk)) else: with open(file_src, "r", encoding='utf-8') as f_src, \ open(file_tgt, "r", encoding='utf-8') as f_tgt, \ open(file_oracle, "r", encoding='utf-8') as f_orc: for src, tgt, orc in zip(f_src, f_tgt, f_orc): src_tk = tokenizer.tokenize(src.strip()) tgt_tk = tokenizer.tokenize(tgt.strip()) s_st, labl = orc.split('\t') s_st = [int(x) for x in s_st.split()] labl = [int(x) for x in labl.split()] self.ex_list.append((src_tk, tgt_tk, s_st, labl)) print('Load {0} documents'.format(len(self.ex_list))) def __len__(self): return len(self.ex_list) def __getitem__(self, idx): instance = self.ex_list[idx] proc = choice(self.bi_uni_pipeline) instance = proc(instance) return instance def __iter__(self): # iterator to load data for __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))): batch = [] for __ in range(self.batch_size): idx = randint(0, len(self.ex_list)-1) batch.append(self.__getitem__(idx)) # To Tensor yield batch_list_to_batch_tensors(batch) class Preprocess4Seq2seq(Pipeline): """ Pre-processing steps for pretraining transformer """ def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, block_mask=False, mask_whole_word=False, new_segment_ids=False, truncate_config={}, mask_source_words=False, mode="s2s", has_oracle=False, num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False): super().__init__() self.max_len = max_len self.max_pred = max_pred # max tokens of prediction self.mask_prob = mask_prob # masking probability self.vocab_words = vocab_words # vocabulary (sub)words self.indexer = indexer # function from token to token index self.max_len = max_len self._tril_matrix = torch.tril(torch.ones( (max_len, max_len), dtype=torch.long)) self.skipgram_prb = skipgram_prb self.skipgram_size = skipgram_size self.mask_whole_word = mask_whole_word self.new_segment_ids = new_segment_ids self.always_truncate_tail = truncate_config.get( 'always_truncate_tail', False) self.max_len_a = truncate_config.get('max_len_a', None) self.max_len_b = truncate_config.get('max_len_b', None) self.trunc_seg = truncate_config.get('trunc_seg', None) self.task_idx = 3 # relax projection layer for different tasks self.mask_source_words = mask_source_words assert mode in ("s2s", "l2r") self.mode = mode self.has_oracle = has_oracle self.num_qkv = num_qkv self.s2s_special_token = s2s_special_token self.s2s_add_segment = s2s_add_segment self.s2s_share_segment = s2s_share_segment self.pos_shift = pos_shift def __call__(self, instance): tokens_a, tokens_b = instance[:2] if self.pos_shift: tokens_b = ['[S2S_SOS]'] + tokens_b # -3 for special tokens [CLS], [SEP], [SEP] num_truncated_a, _ = truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3, max_len_a=self.max_len_a, max_len_b=self.max_len_b, trunc_seg=self.trunc_seg, always_truncate_tail=self.always_truncate_tail) # Add Special Tokens if self.s2s_special_token: tokens = ['[S2S_CLS]'] + tokens_a + \ ['[S2S_SEP]'] + tokens_b + ['[SEP]'] else: tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]'] if self.new_segment_ids: if self.mode == "s2s": if self.s2s_add_segment: if self.s2s_share_segment: segment_ids = [0] + [1] * \ (len(tokens_a)+1) + [5]*(len(tokens_b)+1) else: segment_ids = [4] + [6] * \ (len(tokens_a)+1) + [5]*(len(tokens_b)+1) else: segment_ids = [4] * (len(tokens_a)+2) + \ [5]*(len(tokens_b)+1) else: segment_ids = [2] * (len(tokens)) else: segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1) if self.pos_shift: n_pred = min(self.max_pred, len(tokens_b)) masked_pos = [len(tokens_a)+2+i for i in range(len(tokens_b))] masked_weights = [1]*n_pred masked_ids = self.indexer(tokens_b[1:]+['[SEP]']) else: # For masked Language Models # the number of prediction is sometimes less than max_pred when sequence is short effective_length = len(tokens_b) if self.mask_source_words: effective_length += len(tokens_a) n_pred = min(self.max_pred, max( 1, int(round(effective_length*self.mask_prob)))) # candidate positions of masked tokens cand_pos = [] special_pos = set() for i, tk in enumerate(tokens): # only mask tokens_b (target sequence) # we will mask [SEP] as an ending symbol if (i >= len(tokens_a)+2) and (tk != '[CLS]'): cand_pos.append(i) elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')): cand_pos.append(i) else: special_pos.add(i) shuffle(cand_pos) masked_pos = set() max_cand_pos = max(cand_pos) for pos in cand_pos: if len(masked_pos) >= n_pred: break if pos in masked_pos: continue def _expand_whole_word(st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tokens[new_end].startswith('##'): new_end += 1 return new_st, new_end if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb): # ngram cur_skipgram_size = randint(2, self.skipgram_size) if self.mask_whole_word: st_pos, end_pos = _expand_whole_word( pos, pos + cur_skipgram_size) else: st_pos, end_pos = pos, pos + cur_skipgram_size else: # directly mask if self.mask_whole_word: st_pos, end_pos = _expand_whole_word(pos, pos + 1) else: st_pos, end_pos = pos, pos + 1 for mp in range(st_pos, end_pos): if (0 < mp <= max_cand_pos) and (mp not in special_pos): masked_pos.add(mp) else: break masked_pos = list(masked_pos) if len(masked_pos) > n_pred: shuffle(masked_pos) masked_pos = masked_pos[:n_pred] masked_tokens = [tokens[pos] for pos in masked_pos] for pos in masked_pos: if rand() < 0.8: # 80% tokens[pos] = '[MASK]' elif rand() < 0.5: # 10% tokens[pos] = get_random_word(self.vocab_words) # when n_pred < max_pred, we only calculate loss within n_pred masked_weights = [1]*len(masked_tokens) # Token Indexing masked_ids = self.indexer(masked_tokens) # Token Indexing input_ids = self.indexer(tokens) # Zero Padding n_pad = self.max_len - len(input_ids) input_ids.extend([0]*n_pad) segment_ids.extend([0]*n_pad) if self.num_qkv > 1: mask_qkv = [0]*(len(tokens_a)+2) + [1] * (len(tokens_b)+1) mask_qkv.extend([0]*n_pad) else: mask_qkv = None input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long) if self.mode == "s2s": input_mask[:, :len(tokens_a)+2].fill_(1) second_st, second_end = len( tokens_a)+2, len(tokens_a)+len(tokens_b)+3 input_mask[second_st:second_end, second_st:second_end].copy_( self._tril_matrix[:second_end-second_st, :second_end-second_st]) else: st, end = 0, len(tokens_a) + len(tokens_b) + 3 input_mask[st:end, st:end].copy_(self._tril_matrix[:end, :end]) # Zero Padding for masked target if self.max_pred > n_pred: n_pad = self.max_pred - n_pred if masked_ids is not None: masked_ids.extend([0]*n_pad) if masked_pos is not None: masked_pos.extend([0]*n_pad) if masked_weights is not None: masked_weights.extend([0]*n_pad) oracle_pos = None oracle_weights = None oracle_labels = None if self.has_oracle: s_st, labls = instance[2:] oracle_pos = [] oracle_labels = [] for st, lb in zip(s_st, labls): st = st - num_truncated_a[0] if st > 0 and st < len(tokens_a): oracle_pos.append(st) oracle_labels.append(lb) oracle_pos = oracle_pos[:20] oracle_labels = oracle_labels[:20] oracle_weights = [1] * len(oracle_pos) if len(oracle_pos) < 20: x_pad = 20 - len(oracle_pos) oracle_pos.extend([0] * x_pad) oracle_labels.extend([0] * x_pad) oracle_weights.extend([0] * x_pad) return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids, masked_pos, masked_weights, -1, self.task_idx, oracle_pos, oracle_weights, oracle_labels) return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids, masked_pos, masked_weights, -1, self.task_idx) class Preprocess4Seq2seqDecoder(Pipeline): """ Pre-processing steps for pretraining transformer """ def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128, new_segment_ids=False, mode="s2s", num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False): super().__init__() self.max_len = max_len self.vocab_words = vocab_words # vocabulary (sub)words self.indexer = indexer # function from token to token index self.max_len = max_len self._tril_matrix = torch.tril(torch.ones( (max_len, max_len), dtype=torch.long)) self.new_segment_ids = new_segment_ids self.task_idx = 3 # relax projection layer for different tasks assert mode in ("s2s", "l2r") self.mode = mode self.max_tgt_length = max_tgt_length self.num_qkv = num_qkv self.s2s_special_token = s2s_special_token self.s2s_add_segment = s2s_add_segment self.s2s_share_segment = s2s_share_segment self.pos_shift = pos_shift def __call__(self, instance): tokens_a, max_a_len = instance # Add Special Tokens if self.s2s_special_token: padded_tokens_a = ['[S2S_CLS]'] + tokens_a + ['[S2S_SEP]'] else: padded_tokens_a = ['[CLS]'] + tokens_a + ['[SEP]'] assert len(padded_tokens_a) <= max_a_len + 2 if max_a_len + 2 > len(padded_tokens_a): padded_tokens_a += ['[PAD]'] * \ (max_a_len + 2 - len(padded_tokens_a)) assert len(padded_tokens_a) == max_a_len + 2 max_len_in_batch = min(self.max_tgt_length + max_a_len + 2, self.max_len) tokens = padded_tokens_a if self.new_segment_ids: if self.mode == "s2s": _enc_seg1 = 0 if self.s2s_share_segment else 4 if self.s2s_add_segment: if self.s2s_share_segment: segment_ids = [ 0] + [1]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [ 4] + [6]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [4]*(len(padded_tokens_a)) + \ [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [2]*max_len_in_batch else: segment_ids = [0]*(len(padded_tokens_a)) \ + [1]*(max_len_in_batch - len(padded_tokens_a)) if self.num_qkv > 1: mask_qkv = [0]*(len(padded_tokens_a)) + [1] * \ (max_len_in_batch - len(padded_tokens_a)) else: mask_qkv = None position_ids = [] for i in range(len(tokens_a) + 2): position_ids.append(i) for i in range(len(tokens_a) + 2, max_a_len + 2): position_ids.append(0) for i in range(max_a_len + 2, max_len_in_batch): position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2) # Token Indexing input_ids = self.indexer(tokens) # Zero Padding input_mask = torch.zeros( max_len_in_batch, max_len_in_batch, dtype=torch.long) if self.mode == "s2s": input_mask[:, :len(tokens_a)+2].fill_(1) else: st, end = 0, len(tokens_a) + 2 input_mask[st:end, st:end].copy_( self._tril_matrix[:end, :end]) input_mask[end:, :len(tokens_a)+2].fill_(1) second_st, second_end = len(padded_tokens_a), max_len_in_batch input_mask[second_st:second_end, second_st:second_end].copy_( self._tril_matrix[:second_end-second_st, :second_end-second_st]) return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
data2vec_vision-main
unilm-v1/src/biunilm/seq2seq_loader.py
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def read_traces_from_file(file_name): with open(file_name, "rb") as fin: meta = pickle.load(fin) num_samples = meta["num_samples"] samples = [] for _ in range(num_samples): samples.append(pickle.load(fin)) return samples def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None): # if not any((length_penalty, alpha, expect, min_len)): # raise ValueError( # "You can only specify length penalty or alpha, but not both.") scores = sample["scores"] wids_list = sample["wids"] ptrs = sample["ptrs"] last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid in (eos_id, pad_id) for wid in wids): last_frame_id = i break while all(wid == pad_id for wid in wids_list[last_frame_id]): last_frame_id -= 1 max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if fid <= last_frame_id and scores[fid][i] >= 0: # skip paddings continue if (wid in (eos_id, pad_id)) or fid == last_frame_id: s = scores[fid][i] if length_penalty: if expect: s -= length_penalty * math.fabs(fid+1 - expect) else: s += length_penalty * (fid + 1) elif alpha: s = s / math.pow((5 + fid + 1) / 6.0, alpha) if s > max_score: # if (frame_id != -1) and min_len and (fid+1 < min_len): # continue max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: seq = [] else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() return seq def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def simple_postprocess(tk_list): # truncate duplicate punctuations while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]): tk_list = tk_list[:-3] return tk_list def main(args): tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) eos_id, pad_id = set(tokenizer.convert_tokens_to_ids(["[SEP]", "[PAD]"])) for input_file in tqdm(glob.glob(args.input)): if not Path(input_file+'.trace.pickle').exists(): continue print(input_file) samples = read_traces_from_file(input_file+'.trace.pickle') results = [] for s in samples: word_ids = get_best_sequence(s, eos_id, pad_id, alpha=args.alpha, length_penalty=args.length_penalty, expect=args.expect, min_len=args.min_len) tokens = tokenizer.convert_ids_to_tokens(word_ids) buf = [] for t in tokens: if t in ("[SEP]", "[PAD]"): break else: buf.append(t) results.append(" ".join(simple_postprocess(detokenize(buf)))) fn_out = input_file+'.' if args.length_penalty: fn_out += 'lenp'+str(args.length_penalty) if args.expect: fn_out += 'exp'+str(args.expect) if args.alpha: fn_out += 'alp'+str(args.alpha) if args.min_len: fn_out += 'minl'+str(args.min_len) with open(fn_out, "w", encoding="utf-8") as fout: for line in results: fout.write(line) fout.write("\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", type=str, help="Input file.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--alpha", default=None, type=float) parser.add_argument("--length_penalty", default=None, type=float) parser.add_argument("--expect", default=None, type=float, help="Expectation of target length.") parser.add_argument("--min_len", default=None, type=int) parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") args = parser.parse_args() main(args)
data2vec_vision-main
unilm-v1/src/biunilm/gen_seq_from_trace.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import math import json import argparse import random from pathlib import Path from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear from nn.data_parallel import DataParallelImbalance import biunilm.seq2seq_loader as seq2seq_loader import torch.distributed as dist logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def _get_max_epoch_model(output_dir): fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin")) fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin")) if (not fn_model_list) or (not fn_optim_list): return None both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list] ) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list]) if both_set: return max(both_set) else: return None def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--src_file", default=None, type=str, help="The input data file name.") parser.add_argument("--tgt_file", default=None, type=str, help="The output data file name.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--config_path", default=None, type=str, help="Bert config file path.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--log_dir", default='', type=str, required=True, help="The output directory where the log will be written.") parser.add_argument("--model_recover_path", default=None, type=str, required=True, help="The file of fine-tuned pretraining model.") parser.add_argument("--optim_recover_path", default=None, type=str, help="The file of pretraining optimizer.") # Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=64, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--label_smoothing", default=0, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="The weight decay rate for Adam.") parser.add_argument("--finetune_decay", action='store_true', help="Weight decay to the original weights.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--hidden_dropout_prob", default=0.1, type=float, help="Dropout rate for hidden states.") parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float, help="Dropout rate for attention probabilities.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--fp32_embedding', action='store_true', help="Whether to use 32-bit float precision instead of 16-bit for embeddings") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--amp', action='store_true', help="Whether to use amp for fp16") parser.add_argument('--from_scratch', action='store_true', help="Initialize parameters with random values (i.e., training from scratch).") parser.add_argument('--new_segment_ids', action='store_true', help="Use new segment ids for bi-uni-directional LM.") parser.add_argument('--new_pos_ids', action='store_true', help="Use new position ids for LMs.") parser.add_argument('--tokenized_input', action='store_true', help="Whether the input is tokenized.") parser.add_argument('--max_len_a', type=int, default=0, help="Truncate_config: maximum length of segment A.") parser.add_argument('--max_len_b', type=int, default=0, help="Truncate_config: maximum length of segment B.") parser.add_argument('--trunc_seg', default='', help="Truncate_config: first truncate segment A/B (option: a, b).") parser.add_argument('--always_truncate_tail', action='store_true', help="Truncate_config: Whether we should always truncate tail.") parser.add_argument("--mask_prob", default=0.15, type=float, help="Number of prediction is sometimes less than max_pred when sequence is short.") parser.add_argument("--mask_prob_eos", default=0, type=float, help="Number of prediction is sometimes less than max_pred when sequence is short.") parser.add_argument('--max_pred', type=int, default=20, help="Max tokens of prediction.") parser.add_argument("--num_workers", default=0, type=int, help="Number of workers for the data loader.") parser.add_argument('--mask_source_words', action='store_true', help="Whether to mask source words for training") parser.add_argument('--skipgram_prb', type=float, default=0.0, help='prob of ngram mask') parser.add_argument('--skipgram_size', type=int, default=1, help='the max size of ngram mask') parser.add_argument('--mask_whole_word', action='store_true', help="Whether masking a whole word.") parser.add_argument('--do_l2r_training', action='store_true', help="Whether to do left to right training") parser.add_argument('--has_sentence_oracle', action='store_true', help="Whether to have sentence level oracle for training. " "Only useful for summary generation") parser.add_argument('--max_position_embeddings', type=int, default=None, help="max position embeddings") parser.add_argument('--relax_projection', action='store_true', help="Use different projection layers for tasks.") parser.add_argument('--ffn_type', default=0, type=int, help="0: default mlp; 1: W((Wx+b) elem_prod x);") parser.add_argument('--num_qkv', default=0, type=int, help="Number of different <Q,K,V>.") parser.add_argument('--seg_emb', action='store_true', help="Using segment embedding for self-attention.") parser.add_argument('--s2s_special_token', action='store_true', help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.") parser.add_argument('--s2s_add_segment', action='store_true', help="Additional segmental for the encoder of S2S.") parser.add_argument('--s2s_share_segment', action='store_true', help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).") parser.add_argument('--pos_shift', action='store_true', help="Using position shift for fine-tuning.") args = parser.parse_args() assert Path(args.model_recover_path).exists( ), "--model_recover_path doesn't exist" args.output_dir = args.output_dir.replace( '[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', '')) args.log_dir = args.log_dir.replace( '[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', '')) os.makedirs(args.output_dir, exist_ok=True) os.makedirs(args.log_dir, exist_ok=True) json.dump(args.__dict__, open(os.path.join( args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2) if args.local_rank == -1 or args.no_cuda: device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs dist.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int( args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if args.local_rank not in (-1, 0): # Make sure only the first process in distributed training will download model & vocab dist.barrier() tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) if args.max_position_embeddings: tokenizer.max_len = args.max_position_embeddings data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer if args.local_rank == 0: dist.barrier() if args.do_train: print("Loading Train Dataset", args.data_dir) bi_uni_pipeline = [seq2seq_loader.Preprocess4Seq2seq(args.max_pred, args.mask_prob, list(tokenizer.vocab.keys( )), tokenizer.convert_tokens_to_ids, args.max_seq_length, new_segment_ids=args.new_segment_ids, truncate_config={'max_len_a': args.max_len_a, 'max_len_b': args.max_len_b, 'trunc_seg': args.trunc_seg, 'always_truncate_tail': args.always_truncate_tail}, mask_source_words=args.mask_source_words, skipgram_prb=args.skipgram_prb, skipgram_size=args.skipgram_size, mask_whole_word=args.mask_whole_word, mode="s2s", has_oracle=args.has_sentence_oracle, num_qkv=args.num_qkv, s2s_special_token=args.s2s_special_token, s2s_add_segment=args.s2s_add_segment, s2s_share_segment=args.s2s_share_segment, pos_shift=args.pos_shift)] file_oracle = None if args.has_sentence_oracle: file_oracle = os.path.join(args.data_dir, 'train.oracle') fn_src = os.path.join( args.data_dir, args.src_file if args.src_file else 'train.src') fn_tgt = os.path.join( args.data_dir, args.tgt_file if args.tgt_file else 'train.tgt') train_dataset = seq2seq_loader.Seq2SeqDataset( fn_src, fn_tgt, args.train_batch_size, data_tokenizer, args.max_seq_length, file_oracle=file_oracle, bi_uni_pipeline=bi_uni_pipeline) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset, replacement=False) _batch_size = args.train_batch_size else: train_sampler = DistributedSampler(train_dataset) _batch_size = args.train_batch_size // dist.get_world_size() train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=_batch_size, sampler=train_sampler, num_workers=args.num_workers, collate_fn=seq2seq_loader.batch_list_to_batch_tensors, pin_memory=False) # note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps) # t_total = int(math.ceil(len(train_dataset.ex_list) / args.train_batch_size) t_total = int(len(train_dataloader) * args.num_train_epochs / args.gradient_accumulation_steps) amp_handle = None if args.fp16 and args.amp: from apex import amp amp_handle = amp.init(enable_caching=True) logger.info("enable fp16 with amp") # Prepare model recover_step = _get_max_epoch_model(args.output_dir) cls_num_labels = 2 type_vocab_size = 6 + \ (1 if args.s2s_add_segment else 0) if args.new_segment_ids else 2 num_sentlvl_labels = 2 if args.has_sentence_oracle else 0 relax_projection = 4 if args.relax_projection else 0 if args.local_rank not in (-1, 0): # Make sure only the first process in distributed training will download model & vocab dist.barrier() if (recover_step is None) and (args.model_recover_path is None): # if _state_dict == {}, the parameters are randomly initialized # if _state_dict == None, the parameters are initialized with bert-init _state_dict = {} if args.from_scratch else None model = BertForPreTrainingLossMask.from_pretrained( args.bert_model, state_dict=_state_dict, num_labels=cls_num_labels, num_rel=0, type_vocab_size=type_vocab_size, config_path=args.config_path, task_idx=3, num_sentlvl_labels=num_sentlvl_labels, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing, fp32_embedding=args.fp32_embedding, relax_projection=relax_projection, new_pos_ids=args.new_pos_ids, ffn_type=args.ffn_type, hidden_dropout_prob=args.hidden_dropout_prob, attention_probs_dropout_prob=args.attention_probs_dropout_prob, num_qkv=args.num_qkv, seg_emb=args.seg_emb) global_step = 0 else: if recover_step: logger.info("***** Recover model: %d *****", recover_step) model_recover = torch.load(os.path.join( args.output_dir, "model.{0}.bin".format(recover_step)), map_location='cpu') # recover_step == number of epochs global_step = math.floor( recover_step * t_total / args.num_train_epochs) elif args.model_recover_path: logger.info("***** Recover model: %s *****", args.model_recover_path) model_recover = torch.load( args.model_recover_path, map_location='cpu') global_step = 0 model = BertForPreTrainingLossMask.from_pretrained( args.bert_model, state_dict=model_recover, num_labels=cls_num_labels, num_rel=0, type_vocab_size=type_vocab_size, config_path=args.config_path, task_idx=3, num_sentlvl_labels=num_sentlvl_labels, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing, fp32_embedding=args.fp32_embedding, relax_projection=relax_projection, new_pos_ids=args.new_pos_ids, ffn_type=args.ffn_type, hidden_dropout_prob=args.hidden_dropout_prob, attention_probs_dropout_prob=args.attention_probs_dropout_prob, num_qkv=args.num_qkv, seg_emb=args.seg_emb) if args.local_rank == 0: dist.barrier() if args.fp16: model.half() if args.fp32_embedding: model.bert.embeddings.word_embeddings.float() model.bert.embeddings.position_embeddings.float() model.bert.embeddings.token_type_embeddings.float() model.to(device) if args.local_rank != -1: try: from torch.nn.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("DistributedDataParallel") model = DDP(model, device_ids=[ args.local_rank], output_device=args.local_rank, find_unused_parameters=True) elif n_gpu > 1: # model = torch.nn.DataParallel(model) model = DataParallelImbalance(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any( nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any( nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: # from apex.optimizers import FP16_Optimizer from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer_State( optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer_State( optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) if recover_step: logger.info("***** Recover optimizer: %d *****", recover_step) optim_recover = torch.load(os.path.join( args.output_dir, "optim.{0}.bin".format(recover_step)), map_location='cpu') if hasattr(optim_recover, 'state_dict'): optim_recover = optim_recover.state_dict() optimizer.load_state_dict(optim_recover) if args.loss_scale == 0: logger.info("***** Recover optimizer: dynamic_loss_scale *****") optimizer.dynamic_loss_scale = True logger.info("***** CUDA.empty_cache() *****") torch.cuda.empty_cache() if args.do_train: logger.info("***** Running training *****") logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", t_total) model.train() if recover_step: start_epoch = recover_step+1 else: start_epoch = 1 for i_epoch in trange(start_epoch, int(args.num_train_epochs)+1, desc="Epoch", disable=args.local_rank not in (-1, 0)): if args.local_rank != -1: train_sampler.set_epoch(i_epoch) iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)', disable=args.local_rank not in (-1, 0)) for step, batch in enumerate(iter_bar): batch = [ t.to(device) if t is not None else None for t in batch] if args.has_sentence_oracle: input_ids, segment_ids, input_mask, mask_qkv, lm_label_ids, masked_pos, masked_weights, is_next, task_idx, oracle_pos, oracle_weights, oracle_labels = batch else: input_ids, segment_ids, input_mask, mask_qkv, lm_label_ids, masked_pos, masked_weights, is_next, task_idx = batch oracle_pos, oracle_weights, oracle_labels = None, None, None loss_tuple = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next, masked_pos=masked_pos, masked_weights=masked_weights, task_idx=task_idx, masked_pos_2=oracle_pos, masked_weights_2=oracle_weights, masked_labels_2=oracle_labels, mask_qkv=mask_qkv) masked_lm_loss, next_sentence_loss = loss_tuple if n_gpu > 1: # mean() to average on multi-gpu. # loss = loss.mean() masked_lm_loss = masked_lm_loss.mean() next_sentence_loss = next_sentence_loss.mean() loss = masked_lm_loss + next_sentence_loss # logging for each step (i.e., before normalization by args.gradient_accumulation_steps) iter_bar.set_description('Iter (loss=%5.3f)' % loss.item()) # ensure that accumlated gradients are normalized if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) if amp_handle: amp_handle._clear_cache() else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: lr_this_step = args.learning_rate * \ warmup_linear(global_step/t_total, args.warmup_proportion) if args.fp16: # modify learning rate with special warm up BERT uses for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model if (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info( "** ** * Saving fine-tuned model and optimizer ** ** * ") model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join( args.output_dir, "model.{0}.bin".format(i_epoch)) torch.save(model_to_save.state_dict(), output_model_file) output_optim_file = os.path.join( args.output_dir, "optim.{0}.bin".format(i_epoch)) torch.save(optimizer.state_dict(), output_optim_file) logger.info("***** CUDA.empty_cache() *****") torch.cuda.empty_cache() if __name__ == "__main__": main()
data2vec_vision-main
unilm-v1/src/biunilm/run_seq2seq.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import logging import argparse import math from tqdm import tqdm, trange import numpy as np import torch import random import pickle from transformers import BertTokenizer, RobertaTokenizer from s2s_ft.modeling_decoding import BertForSeq2SeqDecoder, BertConfig from transformers.tokenization_bert import whitespace_tokenize import s2s_ft.s2s_loader as seq2seq_loader from s2s_ft.utils import load_and_cache_examples from transformers import \ BertTokenizer, RobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer TOKENIZER_CLASSES = { 'bert': BertTokenizer, 'minilm': MinilmTokenizer, 'roberta': RobertaTokenizer, 'unilm': UnilmTokenizer, } class WhitespaceTokenizer(object): def tokenize(self, text): return whitespace_tokenize(text) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def ascii_print(text): text = text.encode("ascii", "ignore") print(text) def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(TOKENIZER_CLASSES.keys())) parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model checkpoint.") parser.add_argument("--config_path", default=None, type=str, help="Path to config.json for the model.") # tokenizer_name parser.add_argument("--tokenizer_name", default=None, type=str, required=True, help="tokenizer name") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") # decoding parameters parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--amp', action='store_true', help="Whether to use amp for fp16") parser.add_argument("--input_file", type=str, help="Input file") parser.add_argument('--subset', type=int, default=0, help="Decode a subset of the input dataset.") parser.add_argument("--output_file", type=str, help="output file") parser.add_argument("--split", type=str, default="", help="Data split (train/val/test).") parser.add_argument('--tokenized_input', action='store_true', help="Whether the input is tokenized.") parser.add_argument('--seed', type=int, default=123, help="random seed for initialization") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--batch_size', type=int, default=4, help="Batch size for decoding.") parser.add_argument('--beam_size', type=int, default=1, help="Beam size for searching") parser.add_argument('--length_penalty', type=float, default=0, help="Length penalty for beam search") parser.add_argument('--forbid_duplicate_ngrams', action='store_true') parser.add_argument('--forbid_ignore_word', type=str, default=None, help="Forbid the word during forbid_duplicate_ngrams") parser.add_argument("--min_len", default=1, type=int) parser.add_argument('--need_score_traces', action='store_true') parser.add_argument('--ngram_size', type=int, default=3) parser.add_argument('--mode', default="s2s", choices=["s2s", "l2r", "both"]) parser.add_argument('--max_tgt_length', type=int, default=128, help="maximum length of target sequence") parser.add_argument('--s2s_special_token', action='store_true', help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.") parser.add_argument('--s2s_add_segment', action='store_true', help="Additional segmental for the encoder of S2S.") parser.add_argument('--s2s_share_segment', action='store_true', help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).") parser.add_argument('--pos_shift', action='store_true', help="Using position shift for fine-tuning.") parser.add_argument("--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3") args = parser.parse_args() if args.need_score_traces and args.beam_size <= 1: raise ValueError( "Score trace is only available for beam search with beam size > 1.") if args.max_tgt_length >= args.max_seq_length - 2: raise ValueError("Maximum tgt length exceeds max seq length - 2.") device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() if args.seed > 0: random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) else: random_seed = random.randint(0, 10000) logger.info("Set random seed as: {}".format(random_seed)) random.seed(random_seed) np.random.seed(random_seed) torch.manual_seed(random_seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained( args.tokenizer_name, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) if args.model_type == "roberta": vocab = tokenizer.encoder else: vocab = tokenizer.vocab tokenizer.max_len = args.max_seq_length config_file = args.config_path if args.config_path else os.path.join(args.model_path, "config.json") logger.info("Read decoding config from: %s" % config_file) config = BertConfig.from_json_file(config_file) bi_uni_pipeline = [] bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder( list(vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, max_tgt_length=args.max_tgt_length, pos_shift=args.pos_shift, source_type_id=config.source_type_id, target_type_id=config.target_type_id, cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, pad_token=tokenizer.pad_token)) mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids( [tokenizer.mask_token, tokenizer.sep_token, tokenizer.sep_token]) forbid_ignore_set = None if args.forbid_ignore_word: w_list = [] for w in args.forbid_ignore_word.split('|'): if w.startswith('[') and w.endswith(']'): w_list.append(w.upper()) else: w_list.append(w) forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list)) print(args.model_path) found_checkpoint_flag = False for model_recover_path in [args.model_path.strip()]: logger.info("***** Recover model: %s *****", model_recover_path) found_checkpoint_flag = True model = BertForSeq2SeqDecoder.from_pretrained( model_recover_path, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode, max_position_embeddings=args.max_seq_length, pos_shift=args.pos_shift, ) if args.fp16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) torch.cuda.empty_cache() model.eval() next_i = 0 max_src_length = args.max_seq_length - 2 - args.max_tgt_length to_pred = load_and_cache_examples( args.input_file, tokenizer, local_rank=-1, cached_features_file=None, shuffle=False) input_lines = [] for line in to_pred: input_lines.append(tokenizer.convert_ids_to_tokens(line["source_ids"])[:max_src_length]) if args.subset > 0: logger.info("Decoding subset: %d", args.subset) input_lines = input_lines[:args.subset] input_lines = sorted(list(enumerate(input_lines)), key=lambda x: -len(x[1])) output_lines = [""] * len(input_lines) score_trace_list = [None] * len(input_lines) total_batch = math.ceil(len(input_lines) / args.batch_size) with tqdm(total=total_batch) as pbar: batch_count = 0 first_batch = True while next_i < len(input_lines): _chunk = input_lines[next_i:next_i + args.batch_size] buf_id = [x[0] for x in _chunk] buf = [x[1] for x in _chunk] next_i += args.batch_size batch_count += 1 max_a_len = max([len(x) for x in buf]) instances = [] for instance in [(x, max_a_len) for x in buf]: for proc in bi_uni_pipeline: instances.append(proc(instance)) with torch.no_grad(): batch = seq2seq_loader.batch_list_to_batch_tensors( instances) batch = [ t.to(device) if t is not None else None for t in batch] input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch traces = model(input_ids, token_type_ids, position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv) if args.beam_size > 1: traces = {k: v.tolist() for k, v in traces.items()} output_ids = traces['pred_seq'] else: output_ids = traces.tolist() for i in range(len(buf)): w_ids = output_ids[i] output_buf = tokenizer.convert_ids_to_tokens(w_ids) output_tokens = [] for t in output_buf: if t in (tokenizer.sep_token, tokenizer.pad_token): break output_tokens.append(t) if args.model_type == "roberta": output_sequence = tokenizer.convert_tokens_to_string(output_tokens) else: output_sequence = ' '.join(detokenize(output_tokens)) if '\n' in output_sequence: output_sequence = " [X_SEP] ".join(output_sequence.split('\n')) output_lines[buf_id[i]] = output_sequence if first_batch or batch_count % 50 == 0: logger.info("{} = {}".format(buf_id[i], output_sequence)) if args.need_score_traces: score_trace_list[buf_id[i]] = { 'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]} pbar.update(1) first_batch = False if args.output_file: fn_out = args.output_file else: fn_out = model_recover_path+'.'+args.split with open(fn_out, "w", encoding="utf-8") as fout: for l in output_lines: fout.write(l) fout.write("\n") if args.need_score_traces: with open(fn_out + ".trace.pickle", "wb") as fout_trace: pickle.dump( {"version": 0.0, "num_samples": len(input_lines)}, fout_trace) for x in score_trace_list: pickle.dump(x, fout_trace) if not found_checkpoint_flag: logger.info("Not found the model checkpoint file!") if __name__ == "__main__": main()
data2vec_vision-main
s2s-ft/decode_seq2seq.py
from io import open from setuptools import find_packages, setup extras = { 'serving': ['pydantic', 'uvicorn', 'fastapi'], 'serving-tf': ['pydantic', 'uvicorn', 'fastapi'], 'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch'] } extras['all'] = [package for package in extras.values()] setup( name="s2s-ft", version="0.0.1", author="UniLM Team", author_email="[email protected]", description="Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning", long_description=open("README.md", "r", encoding='utf-8').read(), long_description_content_type="text/markdown", keywords='Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning', license='Apache', url="https://github.com/microsoft/unilm/tree/master/s2s-ft", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['numpy', 'boto3', 'requests', 'tqdm', 'regex != 2019.12.17', 'sentencepiece', 'sacremoses', 'tensorboardX', 'transformers <= 2.10.0'], extras_require=extras, python_requires='>=3.5.0', classifiers=[ 'Programming Language :: Python :: 3', ], )
data2vec_vision-main
s2s-ft/setup.py
import pickle import math import argparse import glob import logging from pathlib import Path from tqdm import tqdm import unicodedata from transformers import BertTokenizer, RobertaTokenizer from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.tokenization_minilm import MinilmTokenizer logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) TOKENIZER_CLASSES = { 'bert': BertTokenizer, 'minilm': MinilmTokenizer, 'roberta': RobertaTokenizer, 'unilm': UnilmTokenizer, } def read_traces_from_file(file_name): with open(file_name, "rb") as fin: meta = pickle.load(fin) num_samples = meta["num_samples"] samples = [] for _ in range(num_samples): samples.append(pickle.load(fin)) return samples def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None): # if not any((length_penalty, alpha, expect, min_len)): # raise ValueError( # "You can only specify length penalty or alpha, but not both.") scores = sample["scores"] wids_list = sample["wids"] ptrs = sample["ptrs"] last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid in (eos_id, pad_id) for wid in wids): last_frame_id = i break while all(wid == pad_id for wid in wids_list[last_frame_id]): last_frame_id -= 1 max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if fid <= last_frame_id and scores[fid][i] >= 0: # skip paddings continue if (wid in (eos_id, pad_id)) or fid == last_frame_id: s = scores[fid][i] if length_penalty: if expect: s -= length_penalty * math.fabs(fid+1 - expect) else: s += length_penalty * (fid + 1) elif alpha: s = s / math.pow((5 + fid + 1) / 6.0, alpha) if s > max_score: # if (frame_id != -1) and min_len and (fid+1 < min_len): # continue max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: seq = [] else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() return seq def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def simple_postprocess(tk_list): # truncate duplicate punctuations while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]): tk_list = tk_list[:-3] return tk_list # def include_unk(line): # return " UNK ".join(line.split('<unk>')).strip() def main(args): tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained( args.tokenizer_name, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) eos_token = tokenizer.sep_token pad_token = tokenizer.pad_token eos_id, pad_id = set(tokenizer.convert_tokens_to_ids([eos_token, pad_token])) logger.info("*********************************************") logger.info(" EOS TOKEN = {}, ID = {}".format(eos_token, eos_id)) logger.info(" PAD TOKEN = {}, ID = {}".format(pad_token, pad_id)) logger.info("*********************************************") for input_file in tqdm(glob.glob(args.input)): if not Path(input_file+'.trace.pickle').exists(): continue print(input_file) samples = read_traces_from_file(input_file+'.trace.pickle') results = [] for s in samples: word_ids = get_best_sequence(s, eos_id, pad_id, alpha=args.alpha, length_penalty=args.length_penalty, expect=args.expect, min_len=args.min_len) tokens = tokenizer.convert_ids_to_tokens(word_ids) buf = [] for t in tokens: if t in (eos_token, pad_token): break else: buf.append(t) if args.model_type == "roberta": output_text = " ".join(simple_postprocess(tokenizer.convert_tokens_to_string(buf).split(' '))) if '\n' in output_text: output_text = " [X_SEP] ".join(output_text.split('\n')) else: output_text = " ".join(simple_postprocess(detokenize(buf))) results.append(output_text) fn_out = input_file + '.' if args.length_penalty: fn_out += 'lenp'+str(args.length_penalty) if args.expect: fn_out += 'exp'+str(args.expect) if args.alpha: fn_out += 'alp'+str(args.alpha) if args.min_len: fn_out += 'minl'+str(args.min_len) with open(fn_out, "w", encoding="utf-8") as fout: for line in results: fout.write(line) fout.write("\n") logger.info("Output file = [%s]" % fn_out) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", type=str, help="Input file.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(TOKENIZER_CLASSES.keys())) parser.add_argument("--alpha", default=None, type=float) parser.add_argument("--length_penalty", default=None, type=float) parser.add_argument("--expect", default=None, type=float, help="Expectation of target length.") parser.add_argument("--min_len", default=None, type=int) # tokenizer_name parser.add_argument("--tokenizer_name", default=None, type=str, required=True, help="tokenizer name") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3") args = parser.parse_args() main(args)
data2vec_vision-main
s2s-ft/gen_seq_from_trace.py
from __future__ import absolute_import, division, print_function import argparse import logging import os import json import random import numpy as np import torch from torch.utils.data import (DataLoader, SequentialSampler) from torch.utils.data.distributed import DistributedSampler try: from torch.utils.tensorboard import SummaryWriter except: from tensorboardX import SummaryWriter import tqdm from s2s_ft.modeling import BertForSequenceToSequence from transformers import AdamW, get_linear_schedule_with_warmup from transformers import \ RobertaConfig, BertConfig, \ BertTokenizer, RobertaTokenizer, \ XLMRobertaConfig, XLMRobertaTokenizer from s2s_ft.configuration_unilm import UnilmConfig from s2s_ft.tokenization_unilm import UnilmTokenizer from s2s_ft.configuration_minilm import MinilmConfig from s2s_ft.tokenization_minilm import MinilmTokenizer from s2s_ft import utils from s2s_ft.config import BertForSeq2SeqConfig logger = logging.getLogger(__name__) MODEL_CLASSES = { 'bert': (BertConfig, BertTokenizer), 'minilm': (MinilmConfig, MinilmTokenizer), 'roberta': (RobertaConfig, RobertaTokenizer), 'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer), 'unilm': (UnilmConfig, UnilmTokenizer), } def prepare_for_training(args, model, checkpoint_state_dict, amp): no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) if amp: model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if checkpoint_state_dict: amp.load_state_dict(checkpoint_state_dict['amp']) if checkpoint_state_dict: optimizer.load_state_dict(checkpoint_state_dict['optimizer']) model.load_state_dict(checkpoint_state_dict['model']) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) return model, optimizer def train(args, training_features, model, tokenizer): """ Train the model """ if args.local_rank in [-1, 0] and args.log_dir: tb_writer = SummaryWriter(log_dir=args.log_dir) else: tb_writer = None if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") else: amp = None # model recover recover_step = utils.get_max_epoch_model(args.output_dir) # if recover_step: # model_recover_checkpoint = os.path.join(args.output_dir, "model.{}.bin".format(recover_step)) # logger.info(" ** Recover model checkpoint in %s ** ", model_recover_checkpoint) # model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu') # optimizer_recover_checkpoint = os.path.join(args.output_dir, "optim.{}.bin".format(recover_step)) # checkpoint_state_dict = torch.load(optimizer_recover_checkpoint, map_location='cpu') # checkpoint_state_dict['model'] = model_state_dict # else: checkpoint_state_dict = None model.to(args.device) model, optimizer = prepare_for_training(args, model, checkpoint_state_dict, amp=amp) if args.n_gpu == 0 or args.no_cuda: per_node_train_batch_size = args.per_gpu_train_batch_size * args.gradient_accumulation_steps else: per_node_train_batch_size = args.per_gpu_train_batch_size * args.n_gpu * args.gradient_accumulation_steps train_batch_size = per_node_train_batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1) global_step = recover_step if recover_step else 0 if args.num_training_steps == -1: args.num_training_steps = int(args.num_training_epochs * len(training_features) / train_batch_size) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.num_training_steps, last_epoch=-1) if checkpoint_state_dict: scheduler.load_state_dict(checkpoint_state_dict["lr_scheduler"]) train_dataset = utils.Seq2seqDatasetForBert( features=training_features, max_source_len=args.max_source_seq_length, max_target_len=args.max_target_seq_length, vocab_size=tokenizer.vocab_size, cls_id=tokenizer.cls_token_id, sep_id=tokenizer.sep_token_id, pad_id=tokenizer.pad_token_id, mask_id=tokenizer.mask_token_id, random_prob=args.random_prob, keep_prob=args.keep_prob, offset=train_batch_size * global_step, num_training_instances=train_batch_size * args.num_training_steps, ) logger.info("Check dataset:") for i in range(5): source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens = train_dataset.__getitem__(i) logger.info("Instance-%d" % i) logger.info("Source tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(source_ids))) logger.info("Target tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(target_ids))) logger.info("Mode = %s" % str(model)) # Train! logger.info(" ***** Running training ***** *") logger.info(" Num examples = %d", len(training_features)) logger.info(" Num Epochs = %.2f", len(train_dataset) / len(training_features)) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Batch size per node = %d", per_node_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", args.num_training_steps) if args.num_training_steps <= global_step: logger.info("Training is done. Please use a new dir or clean this dir!") else: # The training features are shuffled train_sampler = SequentialSampler(train_dataset) \ if args.local_rank == -1 else DistributedSampler(train_dataset, shuffle=False) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=per_node_train_batch_size // args.gradient_accumulation_steps, collate_fn=utils.batch_list_to_batch_tensors) train_iterator = tqdm.tqdm( train_dataloader, initial=global_step, desc="Iter (loss=X.XXX, lr=X.XXXXXXX)", disable=args.local_rank not in [-1, 0]) model.train() model.zero_grad() tr_loss, logging_loss = 0.0, 0.0 for step, batch in enumerate(train_iterator): batch = tuple(t.to(args.device) for t in batch) inputs = {'source_ids': batch[0], 'target_ids': batch[1], 'pseudo_ids': batch[2], 'num_source_tokens': batch[3], 'num_target_tokens': batch[4]} loss = model(**inputs) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training train_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0])) if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() logging_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logger.info("") logger.info(" Step [%d ~ %d]: %.2f", global_step - args.logging_steps, global_step, logging_loss) logging_loss = 0.0 if args.local_rank in [-1, 0] and args.save_steps > 0 and \ (global_step % args.save_steps == 0 or global_step == args.num_training_steps): save_path = os.path.join(args.output_dir, "ckpt-%d" % global_step) os.makedirs(save_path, exist_ok=True) model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(save_path) # optim_to_save = { # "optimizer": optimizer.state_dict(), # "lr_scheduler": scheduler.state_dict(), # } # if args.fp16: # optim_to_save["amp"] = amp.state_dict() # torch.save( # optim_to_save, os.path.join(args.output_dir, 'optim.{}.bin'.format(global_step))) logger.info("Saving model checkpoint %d into %s", global_step, save_path) if args.local_rank in [-1, 0] and tb_writer: tb_writer.close() def get_args(): parser = argparse.ArgumentParser() # parser.add_argument("--train_source_file", default=None, type=str, required=True, # help="Training data contains source") # parser.add_argument("--train_target_file", default=None, type=str, required=True, # help="Training data contains target") parser.add_argument("--train_file", default=None, type=str, required=True, help="Training data (json format) for training. Keys: source and target") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list:") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--log_dir", default=None, type=str, help="The output directory where the log will be written.") ## Other parameters parser.add_argument("--config_name", default=None, type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_source_seq_length", default=464, type=int, help="The maximum total source sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_target_seq_length", default=48, type=int, help="The maximum total target sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--cached_train_features_file", default=None, type=str, help="Cached training features file") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--label_smoothing", default=0.1, type=float, help="Max gradient norm.") parser.add_argument("--num_training_steps", default=-1, type=int, help="set total number of training steps to perform") parser.add_argument("--num_training_epochs", default=10, type=int, help="set total number of training epochs to perform (--num_training_steps has higher priority)") parser.add_argument("--num_warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--random_prob", default=0.1, type=float, help="prob to random replace a masked token") parser.add_argument("--keep_prob", default=0.1, type=float, help="prob to keep no change for a masked token") parser.add_argument('--logging_steps', type=int, default=500, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1500, help="Save checkpoint every X updates steps.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() return args def prepare(args): # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() os.makedirs(args.output_dir, exist_ok=True) json.dump(args.__dict__, open(os.path.join( args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") def get_model_and_tokenizer(args): config_class, tokenizer_class = MODEL_CLASSES[args.model_type] model_config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None) config = BertForSeq2SeqConfig.from_exist_config( config=model_config, label_smoothing=args.label_smoothing, max_position_embeddings=args.max_source_seq_length + args.max_target_seq_length) logger.info("Model config for seq2seq: %s", str(config)) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = BertForSequenceToSequence.from_pretrained( args.model_name_or_path, config=config, model_type=args.model_type, reuse_position_embedding=True, cache_dir=args.cache_dir if args.cache_dir else None) return model, tokenizer def main(): args = get_args() prepare(args) if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Load pretrained model and tokenizer model, tokenizer = get_model_and_tokenizer(args) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab if args.cached_train_features_file is None: args.cached_train_features_file = os.path.join(args.output_dir, "cached_features_for_training.pt") training_features = utils.load_and_cache_examples( example_file=args.train_file, tokenizer=tokenizer, local_rank=args.local_rank, cached_features_file=args.cached_train_features_file, shuffle=True, ) train(args, training_features, model, tokenizer) if __name__ == "__main__": main()
data2vec_vision-main
s2s-ft/run_seq2seq.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil # pip install pyrouge from evaluations.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=0, help="Truncate line by the maximum length.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace("(", " -LRB- ").replace(")", " -RRB- ") while " " in sentence: sentence = sentence.replace(" ", " ") buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) logger.info("Should save: {}".format(json.dumps(v, indent=2))) if __name__ == "__main__": main()
data2vec_vision-main
s2s-ft/evaluations/eval_for_xsum.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from pyrouge.utils import log from pyrouge.utils.file_utils import verify_dir REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def clean(x): return re.sub( r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''", lambda m: REMAP.get(m.group()), x) class DirectoryProcessor: @staticmethod def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(clean(output_string.lower())) logger.info("Saved processed files to {}.".format(output_dir)) class Rouge155(object): """ This is a wrapper for the ROUGE 1.5.5 summary evaluation package. This class is designed to simplify the evaluation process by: 1) Converting summaries into a format ROUGE understands. 2) Generating the ROUGE configuration file automatically based on filename patterns. This class can be used within Python like this: rouge = Rouge155() rouge.system_dir = 'test/systems' rouge.model_dir = 'test/models' # The system filename pattern should contain one group that # matches the document ID. rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html' # The model filename pattern has '#ID#' as a placeholder for the # document ID. If there are multiple model summaries, pyrouge # will use the provided regex to automatically match them with # the corresponding system summary. Here, [A-Z] matches # multiple model summaries for a given #ID#. rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate() print(rouge_output) output_dict = rouge.output_to_dict(rouge_ouput) print(output_dict) -> {'rouge_1_f_score': 0.95652, 'rouge_1_f_score_cb': 0.95652, 'rouge_1_f_score_ce': 0.95652, 'rouge_1_precision': 0.95652, [...] To evaluate multiple systems: rouge = Rouge155() rouge.system_dir = '/PATH/TO/systems' rouge.model_dir = 'PATH/TO/models' for system_id in ['id1', 'id2', 'id3']: rouge.system_filename_pattern = \ 'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id) rouge.model_filename_pattern = \ 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate(system_id) print(rouge_output) """ def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None): """ Create a Rouge155 object. rouge_dir: Directory containing Rouge-1.5.5.pl rouge_args: Arguments to pass through to ROUGE if you don't want to use the default pyrouge arguments. """ self.temp_dir = temp_dir self.log = log.get_global_console_logger() self.__set_dir_properties() self._config_file = None self._settings_file = self.__get_config_path() self.__set_rouge_dir(rouge_dir) self.args = self.__clean_rouge_args(rouge_args) self._system_filename_pattern = None self._model_filename_pattern = None def save_home_dir(self): config = ConfigParser() section = 'pyrouge settings' config.add_section(section) config.set(section, 'home_dir', self._home_dir) with open(self._settings_file, 'w') as f: config.write(f) self.log.info("Set ROUGE home directory to {}.".format(self._home_dir)) @property def settings_file(self): """ Path of the setttings file, which stores the ROUGE home dir. """ return self._settings_file @property def bin_path(self): """ The full path of the ROUGE binary (although it's technically a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl """ if self._bin_path is None: raise Exception( "ROUGE path not set. Please set the ROUGE home directory " "and ensure that ROUGE-1.5.5.pl exists in it.") return self._bin_path @property def system_filename_pattern(self): """ The regular expression pattern for matching system summary filenames. The regex string. E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. Currently, there is no support for multiple systems. """ return self._system_filename_pattern @system_filename_pattern.setter def system_filename_pattern(self, pattern): self._system_filename_pattern = pattern @property def model_filename_pattern(self): """ The regular expression pattern for matching model summary filenames. The pattern needs to contain the string "#ID#", which is a placeholder for the document ID. E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. "#ID#" is a placeholder for the document ID which has been matched by the "(\d+)" part of the system filename pattern. The different model summaries for a given document ID are matched by the "[A-Z]" part. """ return self._model_filename_pattern @model_filename_pattern.setter def model_filename_pattern(self, pattern): self._model_filename_pattern = pattern @property def config_file(self): return self._config_file @config_file.setter def config_file(self, path): config_dir, _ = os.path.split(path) verify_dir(config_dir, "configuration file") self._config_file = path def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() def sent_split_to_string(s): return "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func) @staticmethod def convert_summaries_to_rouge_format(input_dir, output_dir): """ Convert all files in input_dir into a format ROUGE understands and saves the files to output_dir. The input files are assumed to be plain text with one sentence per line. input_dir: Path of directory containing the input files. output_dir: Path of directory in which the converted files will be saved. """ DirectoryProcessor.process( input_dir, output_dir, Rouge155.convert_text_to_rouge_format) @staticmethod def convert_text_to_rouge_format(text, title="dummy title"): """ Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string. """ sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html @staticmethod def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = [model_filename_pattern.replace('#ID#', id)] # model_filenames = Rouge155.__get_model_filenames_for_id( # id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>") def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp(dir=self.temp_dir) config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file)) def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ # 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results ################################################################### # Private methods def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path)) def __get_rouge_home_dir_from_settings(self): config = ConfigParser() with open(self._settings_file) as f: if hasattr(config, "read_file"): config.read_file(f) else: # use deprecated python 2.x method config.readfp(f) rouge_home_dir = config.get('pyrouge settings', 'home_dir') return rouge_home_dir @staticmethod def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp(dir=self.temp_dir) new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir def __write_summaries(self): self.log.info("Writing summaries.") self.__process_summaries(self.convert_summaries_to_rouge_format) @staticmethod def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern): pattern = re.compile(model_filenames_pattern.replace('#ID#', id)) model_filenames = [ f for f in os.listdir(model_dir) if pattern.match(f)] if not model_filenames: raise Exception( "Could not find any model summaries for the system" " summary with ID {}. Specified model filename pattern was: " "{}".format(id, model_filenames_pattern)) return model_filenames def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, # '-2', # '-1', # '-U', '-m', # '-v', '-r', 1000, '-n', 2, # '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p) def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring) def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args def __add_config_option(self, options): return options + [self._config_file] def __get_config_path(self): if platform.system() == "Windows": parent_dir = os.getenv("APPDATA") config_dir_name = "pyrouge" elif os.name == "posix": parent_dir = os.path.expanduser("~") config_dir_name = ".pyrouge" else: parent_dir = os.path.dirname(__file__) config_dir_name = "" config_dir = os.path.join(parent_dir, config_dir_name) if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, 'settings.ini') if __name__ == "__main__": import argparse from utils.argparsers import rouge_path_parser parser = argparse.ArgumentParser(parents=[rouge_path_parser]) args = parser.parse_args() rouge = Rouge155(args.rouge_home) rouge.save_home_dir()
data2vec_vision-main
s2s-ft/evaluations/bs_pyrouge.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil # pip install pyrouge from evaluations.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=60, help="Truncate line by the maximum length.") parser.add_argument("--duplicate_rate", type=float, default=0.7, help="If the duplicat rate (compared with history) is large, we can discard the current sentence.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"] evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {"(": "-LRB-", ")": "-RRB-", "[": "-LSB-", "]": "-RSB-", "{": "-LCB-", "}": "-RCB-"} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def remove_duplicate(l_list, duplicate_rate): tk_list = [l.lower().split() for l in l_list] r_list = [] history_set = set() for i, w_list in enumerate(tk_list): w_set = set(w_list) if len(w_set & history_set)/len(w_set) <= duplicate_rate: r_list.append(l_list[i]) history_set |= w_set return r_list def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip().replace(" <S_SEP> ", '\n') gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] for sentence in l.strip().split("[X_SEP]"): sentence = fix_tokenization(sentence) sentence = sentence.replace("(", " -LRB- ").replace(")", " -RRB- ") sentence = sentence.replace("[", " -LSB- ").replace("]", " -RSB- ") while " " in sentence: sentence = sentence.replace(" ", " ") if any(get_f1(sentence, s) > 1.0 for s in buf): continue s_len = len(sentence.split()) if s_len <= 4: continue buf.append(sentence) if args.duplicate_rate and args.duplicate_rate < 1: buf = remove_duplicate(buf, args.duplicate_rate) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.replace('\n', ' [X_SEP] ').strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) if __name__ == "__main__": main()
data2vec_vision-main
s2s-ft/evaluations/eval_for_cnndm.py
from __future__ import absolute_import, division, print_function, unicode_literals import logging from transformers import BertConfig, RobertaConfig from s2s_ft.configuration_unilm import UnilmConfig logger = logging.getLogger(__name__) class BertForSeq2SeqConfig(BertConfig): def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1, **kwargs): super(BertForSeq2SeqConfig, self).__init__(**kwargs) self.label_smoothing = label_smoothing self.source_type_id = source_type_id self.target_type_id = target_type_id @classmethod def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None): required_keys = [ "vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads", "hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob", "max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps"] kwargs = {} for key in required_keys: assert hasattr(config, key) kwargs[key] = getattr(config, key) kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"] if isinstance(config, RobertaConfig): kwargs["type_vocab_size"] = 0 kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2 additional_keys = [ "source_type_id", "target_type_id" ] for key in additional_keys: if hasattr(config, key): kwargs[key] = getattr(config, key) if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings: kwargs["max_position_embeddings"] = max_position_embeddings logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings) return cls(label_smoothing=label_smoothing, **kwargs)
data2vec_vision-main
s2s-ft/s2s_ft/config.py
# coding=utf-8 # The MIT License (MIT) # Copyright (c) Microsoft Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ MiniLM model configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from transformers.configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'minilm-l12-h384-uncased': "https://unilm.blob.core.windows.net/ckpt/minilm-l12-h384-uncased-config.json", } class MinilmConfig(PretrainedConfig): r""" :class:`~transformers.MinilmConfig` is the configuration class to store the configuration of a `MinilmModel`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `MiniLMModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `MiniLMModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ pretrained_config_archive_map = MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size=28996, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=6, initializer_range=0.02, layer_norm_eps=1e-12, source_type_id=0, target_type_id=1, **kwargs): super(MinilmConfig, self).__init__(**kwargs) if isinstance(vocab_size, str) or (sys.version_info[0] == 2 and isinstance(vocab_size, unicode)): with open(vocab_size, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size, int): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.source_type_id = source_type_id self.target_type_id = target_type_id else: raise ValueError("First argument must be either a vocabulary size (int)" " or the path to a pretrained model config file (str)")
data2vec_vision-main
s2s-ft/s2s_ft/configuration_minilm.py
# coding=utf-8 """PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from transformers.file_utils import cached_path from torch.nn.modules.loss import _Loss class LabelSmoothingLoss(_Loss): """ With label smoothing, KL-divergence between q_{smoothed ground truth prob.}(w) and p_{prob. computed by model}(w) is minimized. """ def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'): assert 0.0 < label_smoothing <= 1.0 self.ignore_index = ignore_index super(LabelSmoothingLoss, self).__init__( size_average=size_average, reduce=reduce, reduction=reduction) assert label_smoothing > 0 assert tgt_vocab_size > 0 smoothing_value = label_smoothing / (tgt_vocab_size - 2) one_hot = torch.full((tgt_vocab_size,), smoothing_value) one_hot[self.ignore_index] = 0 self.register_buffer('one_hot', one_hot.unsqueeze(0)) self.confidence = 1.0 - label_smoothing self.tgt_vocab_size = tgt_vocab_size def forward(self, output, target): """ output (FloatTensor): batch_size * num_pos * n_classes target (LongTensor): batch_size * num_pos """ assert self.tgt_vocab_size == output.size(2) batch_size, num_pos = target.size(0), target.size(1) output = output.view(-1, self.tgt_vocab_size) target = target.view(-1) model_prob = self.one_hot.repeat(target.size(0), 1) model_prob.scatter_(1, target.unsqueeze(1), self.confidence) model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0) return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2) logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", 'unilm-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin", 'unilm-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin", 'unilm1-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin", 'unilm1-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin", 'unilm1.2-base-uncased': "https://unilm.blob.core.windows.net/ckpt/unilm1.2-base-uncased.bin" } CONFIG_NAME = 'config.json' WEIGHTS_NAME = 'pytorch_model.bin' def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, relax_projection=0, new_pos_ids=False, initializer_range=0.02, task_idx=None, fp32_embedding=False, ffn_type=0, label_smoothing=None, num_qkv=0, seg_emb=False, source_type_id=0, target_type_id=1, no_segment_embedding=False, **kwargs): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.relax_projection = relax_projection self.new_pos_ids = new_pos_ids self.initializer_range = initializer_range self.task_idx = task_idx self.fp32_embedding = fp32_embedding self.ffn_type = ffn_type self.label_smoothing = label_smoothing self.num_qkv = num_qkv self.seg_emb = seg_emb self.no_segment_embedding = no_segment_embedding self.source_type_id = source_type_id self.target_type_id = target_type_id if type_vocab_size == 0: self.no_segment_embedding = True else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding( config.vocab_size, config.hidden_size) if config.no_segment_embedding: self.token_type_embeddings = None else: self.token_type_embeddings = nn.Embedding( config.type_vocab_size, config.hidden_size) if hasattr(config, 'fp32_embedding'): self.fp32_embedding = config.fp32_embedding else: self.fp32_embedding = False if hasattr(config, 'new_pos_ids') and config.new_pos_ids: self.num_pos_emb = 4 else: self.num_pos_emb = 1 self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size * self.num_pos_emb) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange( seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) if self.num_pos_emb > 1: num_batch = position_embeddings.size(0) num_pos = position_embeddings.size(1) position_embeddings = position_embeddings.view( num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] embeddings = words_embeddings + position_embeddings if self.token_type_embeddings is not None: embeddings = embeddings + self.token_type_embeddings(token_type_ids) if self.fp32_embedding: embeddings = embeddings.half() embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int( config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size if hasattr(config, 'num_qkv') and (config.num_qkv > 1): self.num_qkv = config.num_qkv else: self.num_qkv = 1 self.query = nn.Linear( config.hidden_size, self.all_head_size * self.num_qkv) self.key = nn.Linear(config.hidden_size, self.all_head_size * self.num_qkv) self.value = nn.Linear( config.hidden_size, self.all_head_size * self.num_qkv) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.uni_debug_flag = True if os.getenv( 'UNI_DEBUG_FLAG', '') else False if self.uni_debug_flag: self.register_buffer('debug_attention_probs', torch.zeros((512, 512))) if hasattr(config, 'seg_emb') and config.seg_emb: self.b_q_s = nn.Parameter(torch.zeros( 1, self.num_attention_heads, 1, self.attention_head_size)) self.seg_emb = nn.Embedding( config.type_vocab_size, self.all_head_size) else: self.b_q_s = None self.seg_emb = None def transpose_for_scores(self, x, mask_qkv=None): if self.num_qkv > 1: sz = x.size()[:-1] + (self.num_qkv, self.num_attention_heads, self.all_head_size) # (batch, pos, num_qkv, head, head_hid) x = x.view(*sz) if mask_qkv is None: x = x[:, :, 0, :, :] elif isinstance(mask_qkv, int): x = x[:, :, mask_qkv, :, :] else: # mask_qkv: (batch, pos) if mask_qkv.size(1) > sz[1]: mask_qkv = mask_qkv[:, :sz[1]] # -> x: (batch, pos, head, head_hid) x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand( sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2) else: sz = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) # (batch, pos, head, head_hid) x = x.view(*sz) # (batch, head, pos, head_hid) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None, key_cache=None, value_cache=None, ): if history_states is None: mixed_query_layer = self.query(hidden_states) # possible issue: https://github.com/NVIDIA/apex/issues/131 mixed_key_layer = F.linear(hidden_states, self.key.weight) mixed_value_layer = self.value(hidden_states) else: x_states = torch.cat((history_states, hidden_states), dim=1) mixed_query_layer = self.query(hidden_states) # possible issue: https://github.com/NVIDIA/apex/issues/131 mixed_key_layer = F.linear(x_states, self.key.weight) mixed_value_layer = self.value(x_states) if key_cache is not None and isinstance(key_cache, list): key_cache.append(mixed_key_layer) mixed_key_layer = torch.cat(key_cache, dim=1) if value_cache is not None and isinstance(value_cache, list): value_cache.append(mixed_value_layer) mixed_value_layer = torch.cat(value_cache, dim=1) query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv) key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv) value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv) if key_history is not None and not isinstance(key_history, list): key_layer = torch.cat((key_history, key_layer), dim=-2) value_layer = torch.cat((value_history, value_layer), dim=-2) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch, head, pos, pos) attention_scores = torch.matmul( query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2)) if self.seg_emb is not None: seg_rep = self.seg_emb(seg_ids) # (batch, pos, head, head_hid) seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size( 1), self.num_attention_heads, self.attention_head_size) qs = torch.einsum('bnih,bjnh->bnij', query_layer + self.b_q_s, seg_rep) attention_scores = attention_scores + qs # attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if self.uni_debug_flag: _pos = attention_probs.size(-1) self.debug_attention_probs[:_pos, :_pos].copy_( attention_probs[0].mean(0).view(_pos, _pos)) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[ :-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) if isinstance(key_history, list): key_history.append(key_layer) if isinstance(value_history, list): value_history.append(value_layer) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask, history_states=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None): self_output = self.self( input_tensor, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TransformerFFN(nn.Module): def __init__(self, config): super(TransformerFFN, self).__init__() self.ffn_type = config.ffn_type assert self.ffn_type in (1, 2) if self.ffn_type in (1, 2): self.wx0 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (2,): self.wx1 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (1, 2): self.output = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, x): if self.ffn_type in (1, 2): x0 = self.wx0(x) if self.ffn_type == 1: x1 = x elif self.ffn_type == 2: x1 = self.wx1(x) out = self.output(x0 * x1) out = self.dropout(out) out = self.LayerNorm(out + x) return out class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.ffn_type = config.ffn_type if self.ffn_type: self.ffn = TransformerFFN(config) else: self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None): attention_output = self.attention( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history) if self.ffn_type: layer_output = self.ffn(attention_output) else: intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() layer = BertLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None): # history embedding and encoded layer must be simultanously given assert (prev_embedding is None) == (prev_encoded_layers is None) all_encoder_layers = [] if (prev_embedding is not None) and (prev_encoded_layers is not None): history_states = prev_embedding for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if prev_encoded_layers is not None: history_states = prev_encoded_layers[i] else: for i, layer_module in enumerate(self.layer): set_key = None if isinstance(key_history, list): set_key = key_history if len(key_history) < len(self.layer) else key_history[i] set_value = None if isinstance(value_history, list): set_value = value_history if len(key_history) < len(self.layer) else value_history[i] hidden_states = layer_module( hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=set_key, value_history=set_value) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act hid_size = config.hidden_size if hasattr(config, 'relax_projection') and (config.relax_projection > 1): hid_size *= config.relax_projection self.dense = nn.Linear(config.hidden_size, hid_size) self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros( bert_model_embedding_weights.size(0))) if hasattr(config, 'relax_projection') and (config.relax_projection > 1): self.relax_projection = config.relax_projection else: self.relax_projection = 0 self.fp32_embedding = config.fp32_embedding def convert_to_type(tensor): if self.fp32_embedding: return tensor.half() else: return tensor self.type_converter = convert_to_type self.converted = False def forward(self, hidden_states, task_idx=None): if not self.converted: self.converted = True if self.fp32_embedding: self.transform.half() hidden_states = self.transform(self.type_converter(hidden_states)) if self.relax_projection > 1: num_batch = hidden_states.size(0) num_pos = hidden_states.size(1) # (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid) hidden_states = hidden_states.view( num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] if self.fp32_embedding: hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter( self.decoder.weight), self.type_converter(self.bias)) else: hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights, num_labels=2): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, num_labels) def forward(self, sequence_output, pooled_output, task_idx=None): prediction_scores = self.predictions(sequence_output, task_idx) if pooled_output is None: seq_relationship_score = None else: seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class PreTrainedBertModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(PreTrainedBertModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) # module.weight.data.copy_(torch.Tensor( # truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range)) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ logger.info("Model config {}".format(config)) # clean the arguments in kwargs for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx', 'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing', 'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb', 'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'): if arg_clean in kwargs: del kwargs[arg_clean] # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None: weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME) state_dict = torch.load(weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') model.missing_keys = missing_keys if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: logger.info('\n'.join(error_msgs)) return model class BertModel(PreTrainedBertModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLF`) to train on the Next-Sentence task (see BERT's paper). ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def rescale_some_parameters(self): for layer_id, layer in enumerate(self.encoder.layer): layer.attention.output.dense.weight.data.div_( math.sqrt(2.0 * (layer_id + 1))) layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1))) def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. if attention_mask.dim() == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) elif attention_mask.dim() == 3: extended_attention_mask = attention_mask.unsqueeze(1) else: raise NotImplementedError # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to( dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids, key_history=key_history, value_history=value_history) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertModelIncr(BertModel): def __init__(self, config): super(BertModelIncr, self).__init__(config) def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, position_ids, task_idx=task_idx) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return embedding_output, encoded_layers, pooled_output class BertForPreTraining(PreTrainedBertModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, mask_qkv=None, task_idx=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score class BertPreTrainingPairTransform(nn.Module): def __init__(self, config): super(BertPreTrainingPairTransform, self).__init__() self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act # self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) def forward(self, pair_x, pair_y): hidden_states = torch.cat([pair_x, pair_y], dim=-1) hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) # hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertPreTrainingPairRel(nn.Module): def __init__(self, config, num_rel=0): super(BertPreTrainingPairRel, self).__init__() self.R_xy = BertPreTrainingPairTransform(config) self.rel_emb = nn.Embedding(num_rel, config.hidden_size) def forward(self, pair_x, pair_y, pair_r, pair_pos_neg_mask): # (batch, num_pair, hidden) xy = self.R_xy(pair_x, pair_y) r = self.rel_emb(pair_r) _batch, _num_pair, _hidden = xy.size() pair_score = (xy * r).sum(-1) # torch.bmm(xy.view(-1, 1, _hidden),r.view(-1, _hidden, 1)).view(_batch, _num_pair) # .mul_(-1.0): objective to loss return F.logsigmoid(pair_score * pair_pos_neg_mask.type_as(pair_score)).mul_(-1.0) class BertForPreTrainingLossMask(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, num_labels=2, num_rel=0, num_sentlvl_labels=0, no_nsp=False): super(BertForPreTrainingLossMask, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.num_sentlvl_labels = num_sentlvl_labels self.cls2 = None if self.num_sentlvl_labels > 0: self.secondary_pred_proj = nn.Embedding( num_sentlvl_labels, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=num_sentlvl_labels) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') if no_nsp: self.crit_next_sent = None else: self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) if hasattr(config, 'label_smoothing') and config.label_smoothing: self.crit_mask_lm_smoothed = LabelSmoothingLoss( config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none') else: self.crit_mask_lm_smoothed = None self.apply(self.init_bert_weights) self.bert.rescale_some_parameters() def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, masked_pos=None, masked_weights=None, task_idx=None, pair_x=None, pair_x_mask=None, pair_y=None, pair_y_mask=None, pair_r=None, pair_pos_neg_mask=None, pair_loss_mask=None, masked_pos_2=None, masked_weights_2=None, masked_labels_2=None, num_tokens_a=None, num_tokens_b=None, mask_qkv=None): if token_type_ids is None and attention_mask is None: task_0 = (task_idx == 0) task_1 = (task_idx == 1) task_2 = (task_idx == 2) task_3 = (task_idx == 3) sequence_length = input_ids.shape[-1] index_matrix = torch.arange(sequence_length).view( 1, sequence_length).to(input_ids.device) num_tokens = num_tokens_a + num_tokens_b base_mask = (index_matrix < num_tokens.view(-1, 1) ).type_as(input_ids) segment_a_mask = ( index_matrix < num_tokens_a.view(-1, 1)).type_as(input_ids) token_type_ids = ( task_idx + 1 + task_3.type_as(task_idx)).view(-1, 1) * base_mask token_type_ids = token_type_ids - segment_a_mask * \ (task_0 | task_3).type_as(segment_a_mask).view(-1, 1) index_matrix = index_matrix.view(1, 1, sequence_length) index_matrix_t = index_matrix.view(1, sequence_length, 1) tril = index_matrix <= index_matrix_t attention_mask_task_0 = ( index_matrix < num_tokens.view(-1, 1, 1)) & ( index_matrix_t < num_tokens.view(-1, 1, 1)) attention_mask_task_1 = tril & attention_mask_task_0 attention_mask_task_2 = torch.transpose( tril, dim0=-2, dim1=-1) & attention_mask_task_0 attention_mask_task_3 = ( (index_matrix < num_tokens_a.view(-1, 1, 1)) | tril) & attention_mask_task_0 attention_mask = (attention_mask_task_0 & task_0.view(-1, 1, 1)) | \ (attention_mask_task_1 & task_1.view(-1, 1, 1)) | \ (attention_mask_task_2 & task_2.view(-1, 1, 1)) | \ (attention_mask_task_3 & task_3.view(-1, 1, 1)) attention_mask = attention_mask.type_as(input_ids) sequence_output, pooled_output = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) def gather_seq_out_by_pos_average(seq, pos, mask): # pos/mask: (batch, num_pair, max_token_num) batch_size, max_token_num = pos.size(0), pos.size(-1) # (batch, num_pair, max_token_num, seq.size(-1)) pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze( 2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1)) # (batch, num_pair, seq.size(-1)) mask = mask.type_as(pos_vec) pos_vec_masked_sum = ( pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2) return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum) def loss_mask_and_normalize(loss, mask): mask = mask.type_as(loss) loss = loss * mask denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() if masked_lm_labels is None: if masked_pos is None: prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output, task_idx=task_idx) else: sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) return prediction_scores, seq_relationship_score # masked lm sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores_masked, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels) masked_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), masked_weights) # next sentence if self.crit_next_sent is None or next_sentence_label is None: next_sentence_loss = 0.0 else: next_sentence_loss = self.crit_next_sent( seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1)) if self.cls2 is not None and masked_pos_2 is not None: sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None) masked_lm_loss_2 = self.crit_mask_lm( prediction_scores_masked_2.transpose(1, 2).float(), masked_labels_2) masked_lm_loss_2 = loss_mask_and_normalize( masked_lm_loss_2.float(), masked_weights_2) masked_lm_loss = masked_lm_loss + masked_lm_loss_2 if pair_x is None or pair_y is None or pair_r is None or pair_pos_neg_mask is None or pair_loss_mask is None: return masked_lm_loss, next_sentence_loss # pair and relation if pair_x_mask is None or pair_y_mask is None: pair_x_output_masked = gather_seq_out_by_pos( sequence_output, pair_x) pair_y_output_masked = gather_seq_out_by_pos( sequence_output, pair_y) else: pair_x_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_x, pair_x_mask) pair_y_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_y, pair_y_mask) pair_loss = self.crit_pair_rel( pair_x_output_masked, pair_y_output_masked, pair_r, pair_pos_neg_mask) pair_loss = loss_mask_and_normalize( pair_loss.float(), pair_loss_mask) return masked_lm_loss, next_sentence_loss, pair_loss class BertForSeq2SeqFinetuningWithPseudoMask(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config): super(BertForSeq2SeqFinetuningWithPseudoMask, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=2) if hasattr(config, 'label_smoothing') and config.label_smoothing: self.crit_mask_lm_smoothed = LabelSmoothingLoss( config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none') self.crit_mask_lm = None else: self.crit_mask_lm_smoothed = None self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') @staticmethod def create_mask(token_ids, num_tokens): base_position_matrix = torch.arange( 0, token_ids.size(1), dtype=token_ids.dtype, device=token_ids.device).view(1, -1) return (base_position_matrix < num_tokens.view(-1, 1)).to(token_ids.device).type_as(token_ids) def create_target_mask(self, target_ids, num_target_tokens): max_target_len = target_ids.size(1) target_mask = self.create_mask(target_ids, num_target_tokens) target_pos_matrix = torch.arange( 0, max_target_len, dtype=target_ids.dtype, device=target_ids.device).view(1, -1) triangle_attention_mask = \ target_pos_matrix.view(1, max_target_len, 1) >= target_pos_matrix.view(1, 1, max_target_len) triangle_attention_mask = triangle_attention_mask.type_as(target_mask) diagonal_attention_mask = \ target_pos_matrix.view(1, max_target_len, 1) == target_pos_matrix.view(1, 1, max_target_len) diagonal_attention_mask = diagonal_attention_mask.type_as(target_mask) golden_attention_mask = torch.cat((triangle_attention_mask, torch.zeros_like(triangle_attention_mask)), dim=-1) pseudo_attention_mask = torch.cat( (triangle_attention_mask - diagonal_attention_mask, diagonal_attention_mask), dim=-1) return target_mask, torch.cat((golden_attention_mask, pseudo_attention_mask), dim=1) def forward(self, source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, eval_mode=False, fixed_num_tokens=None): source_mask = self.create_mask(source_ids, num_source_tokens) key_history = [] value_history = [] source_sequence_output, pooled_output = self.bert( source_ids, torch.zeros_like(source_ids), source_mask, output_all_encoded_layers=False, key_history=key_history, value_history=value_history) target_mask, extend_target_mask = self.create_target_mask(target_ids, num_target_tokens) extend_target_mask = extend_target_mask.expand(source_ids.size(0), -1, -1) mask_matrix = torch.cat( (source_mask.unsqueeze(1).expand(-1, target_ids.size(1) * 2, -1), extend_target_mask), dim=-1) target_input_sequence = torch.cat((target_ids, pseudo_ids), dim=-1) target_segment_ids = torch.ones_like(target_ids) target_segment_ids = torch.cat((target_segment_ids, target_segment_ids), dim=-1) target_position_ids = torch.arange(target_ids.size(1), dtype=torch.long, device=target_ids.device) target_position_ids = target_position_ids.view(1, -1) + num_source_tokens.view(-1, 1) target_position_ids = torch.cat((target_position_ids, target_position_ids), dim=-1) target_position_ids = target_position_ids * torch.cat((target_mask, target_mask), dim=-1) target_sequence_output, target_pooled_output = self.bert( target_input_sequence, target_segment_ids, mask_matrix, output_all_encoded_layers=False, key_history=key_history, value_history=value_history, position_ids=target_position_ids) def loss_mask_and_normalize(loss, mask, fixed_mask_tokens=None): mask = mask.type_as(loss) loss = loss * mask if fixed_mask_tokens: denominator = fixed_mask_tokens else: denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() prediction_scores_masked, seq_relationship_score = self.cls( target_sequence_output[:, target_ids.size(1):, :], target_pooled_output) if eval_mode: return F.softmax(prediction_scores_masked, dim=-1).gather(index=target_ids.unsqueeze(-1), dim=-1).squeeze( -1), target_mask if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), target_ids) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), target_ids) pseudo_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), target_mask, fixed_mask_tokens=fixed_num_tokens) return pseudo_lm_loss class BertForExtractiveSummarization(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config): super(BertForExtractiveSummarization, self).__init__(config) self.bert = BertModel(config) self.secondary_pred_proj = nn.Embedding(2, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_pos_2=None, masked_weights_2=None, task_idx=None, mask_qkv=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None, task_idx=task_idx) predicted_probs = torch.nn.functional.softmax( prediction_scores_masked_2, dim=-1) return predicted_probs, masked_pos_2, masked_weights_2 class BertForSeq2SeqDecoder(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0, search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0, forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s", pos_shift=False): super(BertForSeq2SeqDecoder, self).__init__(config) self.bert = BertModelIncr(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.apply(self.init_bert_weights) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.mask_word_id = mask_word_id self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) self.search_beam_size = search_beam_size self.length_penalty = length_penalty self.eos_id = eos_id self.sos_id = sos_id self.forbid_duplicate_ngrams = forbid_duplicate_ngrams self.forbid_ignore_set = forbid_ignore_set self.ngram_size = ngram_size self.min_len = min_len assert mode in ("s2s", "l2r") self.mode = mode self.pos_shift = pos_shift def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): if self.search_beam_size > 1: return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask, task_idx=task_idx, mask_qkv=mask_qkv) input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1] curr_attention_mask = attention_mask[:, start_pos:next_pos+1, :next_pos+1] curr_position_ids = position_ids[:, start_pos:next_pos+1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) _, max_ids = torch.max(prediction_scores, dim=-1) output_ids.append(max_ids) if self.pos_shift: if prev_embedding is None: prev_embedding = new_embedding else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] else: if prev_embedding is None: prev_embedding = new_embedding[:, :-1, :] else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x[:, :-1, :] for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] curr_ids = max_ids next_pos += 1 return torch.cat(output_ids, dim=1) def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) K = self.search_beam_size total_scores = [] beam_masks = [] step_ids = [] step_back_ptrs = [] partial_seqs = [] forbid_word_mask = None buf_matrix = None while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1] curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1] curr_position_ids = position_ids[:, start_pos:next_pos + 1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) log_scores = torch.nn.functional.log_softmax( prediction_scores, dim=-1) if forbid_word_mask is not None: log_scores += (forbid_word_mask * -10000.0) if self.min_len and (next_pos - input_length + 1 <= self.min_len): log_scores[:, :, self.eos_id].fill_(-10000.0) kk_scores, kk_ids = torch.topk(log_scores, k=K) if len(total_scores) == 0: k_ids = torch.reshape(kk_ids, [batch_size, K]) back_ptrs = torch.zeros(batch_size, K, dtype=torch.long) k_scores = torch.reshape(kk_scores, [batch_size, K]) else: last_eos = torch.reshape( beam_masks[-1], [batch_size * K, 1, 1]) last_seq_scores = torch.reshape( total_scores[-1], [batch_size * K, 1, 1]) kk_scores += last_eos * (-10000.0) + last_seq_scores kk_scores = torch.reshape(kk_scores, [batch_size, K * K]) k_scores, k_ids = torch.topk(kk_scores, k=K) back_ptrs = torch.floor_divide(k_ids, K) kk_ids = torch.reshape(kk_ids, [batch_size, K * K]) k_ids = torch.gather(kk_ids, 1, k_ids) step_back_ptrs.append(back_ptrs) step_ids.append(k_ids) beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores)) total_scores.append(k_scores) def first_expand(x): input_shape = list(x.size()) expanded_shape = input_shape[:1] + [1] + input_shape[1:] x = torch.reshape(x, expanded_shape) repeat_count = [1, K] + [1] * (len(input_shape) - 1) x = x.repeat(*repeat_count) x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:]) return x def select_beam_items(x, ids): id_shape = list(ids.size()) id_rank = len(id_shape) assert len(id_shape) == 2 x_shape = list(x.size()) x = torch.reshape(x, [batch_size, K] + x_shape[1:]) x_rank = len(x_shape) + 1 assert x_rank >= 2 if id_rank < x_rank: ids = torch.reshape( ids, id_shape + [1] * (x_rank - id_rank)) ids = ids.expand(id_shape + x_shape[1:]) y = torch.gather(x, 1, ids) y = torch.reshape(y, x_shape) return y is_first = (prev_embedding is None) if self.pos_shift: if prev_embedding is None: prev_embedding = first_expand(new_embedding) else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] else: if prev_embedding is None: prev_embedding = first_expand(new_embedding[:, :-1, :]) else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x[:, :-1, :]) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] curr_ids = torch.reshape(k_ids, [batch_size * K, 1]) if is_first: token_type_ids = first_expand(token_type_ids) position_ids = first_expand(position_ids) attention_mask = first_expand(attention_mask) mask_ids = first_expand(mask_ids) if mask_qkv is not None: mask_qkv = first_expand(mask_qkv) if self.forbid_duplicate_ngrams: wids = step_ids[-1].tolist() ptrs = step_back_ptrs[-1].tolist() if is_first: partial_seqs = [] for b in range(batch_size): for k in range(K): partial_seqs.append([wids[b][k]]) else: new_partial_seqs = [] for b in range(batch_size): for k in range(K): new_partial_seqs.append( partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]]) partial_seqs = new_partial_seqs def get_dup_ngram_candidates(seq, n): cands = set() if len(seq) < n: return [] tail = seq[-(n - 1):] if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail): return [] for i in range(len(seq) - (n - 1)): mismatch = False for j in range(n - 1): if tail[j] != seq[i + j]: mismatch = True break if (not mismatch) and not ( self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)): cands.add(seq[i + n - 1]) return list(sorted(cands)) if len(partial_seqs[0]) >= self.ngram_size: dup_cands = [] for seq in partial_seqs: dup_cands.append( get_dup_ngram_candidates(seq, self.ngram_size)) if max(len(x) for x in dup_cands) > 0: if buf_matrix is None: vocab_size = list(log_scores.size())[-1] buf_matrix = np.zeros( (batch_size * K, vocab_size), dtype=float) else: buf_matrix.fill(0) for bk, cands in enumerate(dup_cands): for i, wid in enumerate(cands): buf_matrix[bk, wid] = 1.0 forbid_word_mask = torch.tensor( buf_matrix, dtype=log_scores.dtype) forbid_word_mask = torch.reshape( forbid_word_mask, [batch_size * K, 1, vocab_size]).to(input_ids.device) else: forbid_word_mask = None next_pos += 1 # [(batch, beam)] total_scores = [x.tolist() for x in total_scores] step_ids = [x.tolist() for x in step_ids] step_back_ptrs = [x.tolist() for x in step_back_ptrs] # back tracking traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []} for b in range(batch_size): # [(beam,)] scores = [x[b] for x in total_scores] wids_list = [x[b] for x in step_ids] ptrs = [x[b] for x in step_back_ptrs] traces['scores'].append(scores) traces['wids'].append(wids_list) traces['ptrs'].append(ptrs) # first we need to find the eos frame where all symbols are eos # any frames after the eos frame are invalid last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid == self.eos_id for wid in wids): last_frame_id = i break max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if wid == self.eos_id or fid == last_frame_id: s = scores[fid][i] if self.length_penalty > 0: s /= math.pow((5 + fid + 1) / 6.0, self.length_penalty) if s > max_score: max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: traces['pred_seq'].append([0]) else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() traces['pred_seq'].append(seq) def _pad_sequence(sequences, max_len, padding_value=0): trailing_dims = sequences[0].size()[1:] out_dims = (len(sequences), max_len) + trailing_dims out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) for i, tensor in enumerate(sequences): length = tensor.size(0) # use index notation to prevent duplicate references to the tensor out_tensor[i, :length, ...] = tensor return out_tensor # convert to tensors for DataParallel for k in ('pred_seq', 'scores', 'wids', 'ptrs'): ts_list = traces[k] if not isinstance(ts_list[0], torch.Tensor): dt = torch.float if k == 'scores' else torch.long ts_list = [torch.tensor(it, dtype=dt) for it in ts_list] traces[k] = _pad_sequence( ts_list, output_length, padding_value=0).to(input_ids.device) return traces
data2vec_vision-main
s2s-ft/s2s_ft/modeling_decoding.py