python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MIT License # # Copyright (c) 2019 cybertronai # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Lamb optimizer.""" import torch from torch.optim import Optimizer class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super(Lamb, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # m_t exp_avg.mul_(beta1).add_(1 - beta1, grad) # v_t exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) # Paper v3 does not use debiasing. # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] # Apply bias to lr to avoid broadcast. step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1 weight_norm = p.data.norm(p=2).clamp_(0, 10) adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) if group['weight_decay'] != 0: adam_step.add_(group['weight_decay'], p.data) adam_norm = adam_step.norm(p=2) if weight_norm == 0.0 or adam_norm == 0.0: trust_ratio = 1 else: trust_ratio = weight_norm / (adam_norm + group['eps']) state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(-step_size * trust_ratio, adam_step) return loss @torch.jit.script def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float, beta2: float, step_size: float, eps: float, weight_decay: float): exp_avg = exp_avg * beta1 + (1 - beta1) * grad exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad) adam_step = exp_avg / (exp_avg_sq.sqrt() + eps) adam_step = adam_step + weight_decay * param weight_norm = param.norm(p=2).clamp(0, 10) adam_norm = adam_step.norm(p=2) trust_ratio = weight_norm / (adam_norm + eps) trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio trust_ratio = trust_ratio.float() param = param - step_size * trust_ratio * adam_step return param, exp_avg, exp_avg_sq class JITLamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super().__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 step_size = group['lr'] param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg, exp_avg_sq, beta1, beta2, step_size, group['eps'], group['weight_decay'], ) state['exp_avg'] = exp_avg state['exp_avg_sq'] = exp_avg_sq p.data = param return loss
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/lamb.py
from . import configuration from . import tokenization from . import modeling
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/__init__.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" import copy import json import logging import os from typing import Any, Dict, Tuple from utils.file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url logger = logging.getLogger(__name__) class PretrainedConfig(object): r""" Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations. Note: A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights. It only affects the model's configuration. Class attributes (overridden by derived classes) - **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to recreate the correct object in :class:`~transformers.AutoConfig`. Args: output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should return all hidden-states. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should returns all attentions. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether the model is used as an encoder/decoder or not. is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether the model is used as decoder or not (in which case it's used as an encoder). add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``. tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`) Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names. prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`): Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of heads to prune in said layer. For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. xla_device (:obj:`bool`, `optional`): A flag to indicate if TPU are available or not. chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`): The chunk size of all feed forward layers in the residual attention blocks. A chunk size of :obj:`0` means that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes :obj:`n` < sequence_length embeddings at a time. For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ . Parameters for sequence generation - **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by default in the :obj:`generate` method of the model. - **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by default in the :obj:`generate` method of the model. - **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the :obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise. - **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. - **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be used by default in the :obj:`generate` method of the model. 1 means no beam search. - **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly positive. - **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in the :obj:`generate` method of the model. - **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the :obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. - **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty that will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty. - **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that will be used by default in the :obj:`generate` method of the model. - **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default in the :obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of that size can only occur once. - **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be generated that will be used by default in the :obj:`generate` method of the model. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. - **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed returned sequences for each element in the batch that will be used by default in the :obj:`generate` method of the model. Parameters for fine-tuning tasks - **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the model pretrained weights. - **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint. - **id2label** (:obj:`List[str]`, `optional`) -- A map from index (for instance prediction index, or target index) to label. - **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model. - **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model, typically for a classification task. - **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for the current task. Parameters linked to the tokenizer - **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each text before calling the model. - **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token. - **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token. - **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token. - **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. PyTorch specific parameters - **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be used with Torchscript. TensorFlow specific parameters - **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models). """ model_type: str = "" def __init__(self, **kwargs): # Attributes with defaults self.return_dict = kwargs.pop("return_dict", False) self.output_hidden_states = kwargs.pop("output_hidden_states", False) self.output_attentions = kwargs.pop("output_attentions", False) self.use_cache = kwargs.pop("use_cache", True) # Not used by all models self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models self.use_bfloat16 = kwargs.pop("use_bfloat16", False) self.pruned_heads = kwargs.pop("pruned_heads", {}) # Is decoder is used in encoder-decoder models to differentiate encoder from decoder self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False) self.is_decoder = kwargs.pop("is_decoder", False) self.add_cross_attention = kwargs.pop("add_cross_attention", False) self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False) # Parameters for sequence generation self.max_length = kwargs.pop("max_length", 20) self.min_length = kwargs.pop("min_length", 0) self.do_sample = kwargs.pop("do_sample", False) self.early_stopping = kwargs.pop("early_stopping", False) self.num_beams = kwargs.pop("num_beams", 1) self.temperature = kwargs.pop("temperature", 1.0) self.top_k = kwargs.pop("top_k", 50) self.top_p = kwargs.pop("top_p", 1.0) self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) self.length_penalty = kwargs.pop("length_penalty", 1.0) self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) self.bad_words_ids = kwargs.pop("bad_words_ids", None) self.num_return_sequences = kwargs.pop("num_return_sequences", 1) self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0) # Fine-tuning task arguments self.architectures = kwargs.pop("architectures", None) self.finetuning_task = kwargs.pop("finetuning_task", None) self.id2label = kwargs.pop("id2label", None) self.label2id = kwargs.pop("label2id", None) if self.id2label is not None: kwargs.pop("num_labels", None) self.id2label = dict((int(key), value) for key, value in self.id2label.items()) # Keys are always strings in JSON so convert ids to int here. else: self.num_labels = kwargs.pop("num_labels", 2) # Tokenizer arguments TODO: eventually tokenizer and models should share the same config self.prefix = kwargs.pop("prefix", None) self.bos_token_id = kwargs.pop("bos_token_id", None) self.pad_token_id = kwargs.pop("pad_token_id", None) self.eos_token_id = kwargs.pop("eos_token_id", None) self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forwar", 0) # task specific arguments self.task_specific_params = kwargs.pop("task_specific_params", None) # TPU arguments self.xla_device = kwargs.pop("xla_device", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error("Can't set {} with value {} for {}".format(key, value, self)) raise err @property def use_return_dict(self) -> bool: """ :obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples. """ # If torchscript is set, force `return_dict=False` to avoid jit errors return self.return_dict and not self.torchscript @property def num_labels(self) -> int: """ :obj:`int`: The number of labels for classification models. """ return len(self.id2label) @num_labels.setter def num_labels(self, num_labels: int): self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)} self.label2id = dict(zip(self.id2label.values(), self.id2label.keys())) def save_pretrained(self, save_directory: str): """ Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method. Args: save_directory (:obj:`str`): Directory where the configuration JSON file will be saved (will be created if it does not exist). """ if os.path.isfile(save_directory): raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory)) os.makedirs(save_directory, exist_ok=True) # If we save using the predefined names, we can load using `from_pretrained` output_config_file = os.path.join(save_directory, CONFIG_NAME) self.to_json_file(output_config_file, use_diff=True) logger.info("Configuration saved in {}".format(output_config_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs) -> "PretrainedConfig": r""" Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model configuration. Args: pretrained_model_name_or_path (:obj:`str`): This can be either: - the `shortcut name` of a pretrained model configuration to load from cache or download, e.g., ``bert-base-uncased``. - the `identifier name` of a pretrained model configuration that was uploaded to our S3 by any user, e.g., ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g., ``./my_model_directory/configuration.json``. cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Wheter or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (:obj:`Dict[str, str]`, `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`False`, then this function returns just the final configuration object. If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored. kwargs (:obj:`Dict[str, Any]`, `optional`): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the ``return_unused_kwargs`` keyword parameter. Returns: :class:`PretrainedConfig`: The configuration object instantiated from this pretrained model. Examples:: # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a # derived class: BertConfig config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) assert config.output_attention == True config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True) assert config.output_attention == True assert unused_kwargs == {'foo': False} """ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(config_dict, **kwargs) @classmethod def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a :class:`~transformers.PretrainedConfig` using ``from_dict``. Parameters: pretrained_model_name_or_path (:obj:`str`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. Returns: :obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path else: config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) # Load config dict if resolved_config_file is None: raise EnvironmentError config_dict = cls._dict_from_json_file(resolved_config_file) except EnvironmentError: msg = ( f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n" ) raise EnvironmentError(msg) except json.JSONDecodeError: msg = ( "Couldn't reach server at '{}' to download configuration file or " "configuration file is not a valid JSON file. " "Please check network or file content here: {}.".format(config_file, resolved_config_file) ) raise EnvironmentError(msg) if resolved_config_file == config_file: logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file)) return config_dict, kwargs @classmethod def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig": """ Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters. Args: config_dict (:obj:`Dict[str, Any]`): Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict` method. kwargs (:obj:`Dict[str, Any]`): Additional parameters from which to initialize the configuration object. Returns: :class:`PretrainedConfig`: The configuration object instantiated from those parameters. """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) config = cls(**config_dict) if hasattr(config, "pruned_heads"): config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items()) # Update config with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info("Model config %s", str(config)) if return_unused_kwargs: return config, kwargs else: return config @classmethod def from_json_file(cls, json_file: str) -> "PretrainedConfig": """ Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters. Args: json_file (:obj:`str`): Path to the JSON file containing the parameters. Returns: :class:`PretrainedConfig`: The configuration object instantiated from that JSON file. """ config_dict = cls._dict_from_json_file(json_file) return cls(**config_dict) @classmethod def _dict_from_json_file(cls, json_file: str): with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return "{} {}".format(self.__class__.__name__, self.to_json_string()) def to_diff_dict(self) -> Dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: :obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = PretrainedConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if key not in default_config_dict or value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: :obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) if hasattr(self.__class__, "model_type"): output["model_type"] = self.__class__.model_type return output def to_json_string(self, use_diff: bool = True) -> str: """ Serializes this instance to a JSON string. Args: use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, only the difference between the config instance and the default ``PretrainedConfig()`` is serialized to JSON string. Returns: :obj:`str`: String containing all the attributes that make up this configuration instance in JSON format. """ if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: str, use_diff: bool = True): """ Save this instance to a JSON file. Args: json_file_path (:obj:`str`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, only the difference between the config instance and the default ``PretrainedConfig()`` is serialized to JSON file. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string(use_diff=use_diff)) def update(self, config_dict: Dict[str, Any]): """ Updates attributes of this class with attributes from ``config_dict``. Args: config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that shall be updated for this class. """ for key, value in config_dict.items(): setattr(self, key, value)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/configuration/configuration_utils.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/configuration/__init__.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Fairseq Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BART configuration """ import logging from bart.configuration.configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) BART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/bart-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-base/config.json", "facebook/bart-large": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large/config.json", "facebook/bart-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-mnli/config.json", "facebook/bart-large-cnn": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json", "facebook/bart-large-xsum": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-xsum/config.json", "facebook/mbart-large-en-ro": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/config.json", "yjernite/bart_eli5": "https://s3.amazonaws.com/models.huggingface.co/bert/yjernite/bart_eli5/config.json", } BART_CONFIG_ARGS_DOC = r""" Args: vocab_size (:obj:`int`, optional, defaults to 50265): defines the different tokens that can be represented by `inputs_ids` passed to the forward method. d_model (:obj:`int`, optional, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, optional, defaults to 12): Number of encoder layers, 16 for pegasus, 6 for bart-base and marian decoder_layers (:obj:`int`, optional, defaults to 12): Number of decoder layers, 16 for pegasus, 6 for bart-base and marian encoder_attention_heads (:obj:`int`, optional, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, optional, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, optional, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, optional, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. dropout (:obj:`float`, optional, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, optional, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`int`, optional, defaults to False): True for marian only. normalize_before (:obj:`bool`, optional, defaults to False): Call layernorm before attention ops. True for pegasus, mbart. False for bart. FIXME: marian? normalize_embedding (:obj:`bool`, optional, defaults to True): Call layernorm after embeddings. Only True for Bart. static_position_embeddings (:obj:`bool`, optional, defaults to False): Don't learn positional embeddings, use sinusoidal. True for marian, pegasus. add_final_layer_norm (:obj:`bool`, optional, defaults to False): Why not add another layernorm? scale_embedding (:obj:`bool`, optional, defaults to False): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, optional, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, optional, defaults to 1) Padding token id. bos_token_id (:obj:`int`, optional, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, optional, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop: (:obj:`float`, optional, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. num_labels: (:obj:`int`, optional, defaults to 2): for SequenceClassification is_encoder_decoder (:obj:`int`, optional, defaults to True): True force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), only true for `bart-large-cnn`. """ class BartConfig(PretrainedConfig): r""" Configuration class for Bart. Parameters are renamed from the fairseq implementation """ model_type = "bart" def __init__( self, activation_dropout=0.0, activation_function="gelu", vocab_size=50265, d_model=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, max_position_embeddings=1024, init_std=0.02, classifier_dropout=0.0, num_labels=3, is_encoder_decoder=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, normalize_before=False, add_final_layer_norm=False, scale_embedding=False, normalize_embedding=True, static_position_embeddings=False, add_bias_logits=False, force_bos_token_to_be_generated=False, attention_bias=True, **common_kwargs ): r""" :class:`~transformers.BartConfig` is the configuration class for `BartModel`. Examples:: >>> from transformers import BartConfig, BartModel >>> config = BartConfig.from_pretrained('facebook/bart-large') >>> model = BartModel(config) """ if "hidden_size" in common_kwargs: raise ValueError("hidden size is called d_model") super().__init__( num_labels=num_labels, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **common_kwargs, ) self.vocab_size = vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # Params introduced for Mbart self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.normalize_embedding = normalize_embedding # True for mbart, False otherwise self.normalize_before = normalize_before # combo of fairseq's encoder_ and decoder_normalize_before self.add_final_layer_norm = add_final_layer_norm # Params introduced for Marian self.add_bias_logits = add_bias_logits self.static_position_embeddings = static_position_embeddings # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout # Classifier stuff self.classif_dropout = classifier_dropout self.force_bos_token_to_be_generated = force_bos_token_to_be_generated self.attention_bias = attention_bias @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model def is_valid_mbart(self) -> bool: """Is the configuration aligned with the MBART paper.""" if self.normalize_before and self.add_final_layer_norm and self.scale_embedding: return True if self.normalize_before or self.add_final_layer_norm or self.scale_embedding: logger.info("This configuration is a mixture of MBART and BART settings") return False
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/configuration/configuration_bart.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2010, The T5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ T5 model configuration """ from bart.configuration.configuration_utils import PretrainedConfig from utils import logging logger = logging.get_logger(__name__) T5_PRETRAINED_CONFIG_ARCHIVE_MAP = { "t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-config.json", "t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-config.json", "t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-config.json", "t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-3b-config.json", "t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-11b-config.json", } class T5Config(PretrainedConfig): r""" :class:`~transformers.T5Config` is the configuration class to store the configuration of a `T5Model`. Arguments: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `T5Model`. d_model: Size of the encoder layers and the pooler layer. `d_model` can also accesed via the property `hidden_size`. num_layers: Number of hidden layers in the Transformer encoder. `num_layers` can also be accessed via the property `num_hidden_layers`. d_kv: Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`. d_ff: Size of the intermediate feed forward layer in each `T5Block`. num_heads: Number of attention heads for each attention layer in the Transformer encoder. `num_heads` can also be accessed via the property `num_attention_heads`. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. n_positions: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). `n_positions` can also be accessed via the property `max_position_embeddings`. type_vocab_size: The vocabulary size of the `token_type_ids` passed into `T5Model`. initializer_factor: A factor for initializing all weight matrices (should be kept to 1.0, used for initialization testing). layer_norm_eps: The epsilon used by LayerNorm. """ model_type = "t5" def __init__( self, vocab_size=32128, n_positions=512, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_heads=8, relative_attention_num_buckets=32, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, is_encoder_decoder=True, pad_token_id=0, eos_token_id=1, **kwargs ): super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, ) self.vocab_size = vocab_size self.n_positions = n_positions self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.num_heads @property def num_hidden_layers(self): return self.num_layers
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/configuration/configuration_t5.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional from utils.file_utils import add_start_docstrings from bart.tokenization.tokenization_utils import BatchEncoding from bart.tokenization.tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from bart.tokenization.tokenization_xlm_roberta import XLMRobertaTokenizer from utils import logging logger = logging.get_logger(__name__) _all_mbart_models = ["facebook/mbart-large-en-ro", "facebook/mbart-large-cc25"] SPM_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/sentence.bpe.model" FAIRSEQ_LANGUAGE_CODES = [ "ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", ] class MBartTokenizer(XLMRobertaTokenizer): """ This inherits from XLMRobertaTokenizer. ``prepare_seq2seq_batch`` should be used to encode inputs. Other tokenizer methods like ``encode`` do not work properly. The tokenization method is ``<tokens> <eos> <language code>`` for source language documents, and ``<language code> <tokens> <eos>``` for target language documents. Examples:: >>> from transformers import MBartTokenizer >>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-en-ro') >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> batch: dict = tokenizer.prepare_seq2seq_batch( ... example_english_phrase, src_lang="en_XX", tgt_lang="ro_RO", tgt_texts=expected_translation_romanian ... ) """ vocab_files_names = {"vocab_file": "sentencepiece.bpe.model"} max_model_input_sizes = {m: 1024 for m in _all_mbart_models} pretrained_vocab_files_map = {"vocab_file": {m: SPM_URL for m in _all_mbart_models}} prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.cur_lang_code = self.lang_code_to_id["en_XX"] self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} self._additional_special_tokens = list(self.lang_code_to_id.keys()) self.set_src_lang_special_tokens(kwargs.get("src_lang", "en_XX")) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where ``X`` represents the sequence: - ``input_ids`` (for encoder) ``X [eos, src_lang_code]`` - ``decoder_input_ids``: (for decoder) ``[tgt_lang_code] X [eos]`` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", max_length: Optional[int] = None, max_target_length: Optional[int] = None, truncation: bool = True, padding: str = "longest", return_tensors: str = "pt", **kwargs, ) -> BatchEncoding: """Prepare a batch that can be passed directly to an instance of MBartModel. Arguments: src_texts: (:obj:`list`): list of documents to summarize or source language texts src_lang: (:obj:`str`, `optional`, default='en_XX'): default en_XX (english), the language we are translating from tgt_texts: (:obj:`list`, `optional`): list of tgt language texts or summaries. tgt_lang: (:obj:`str`, `optional`, default='ro_RO'): default ro_RO (romanian), the language we are translating to max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). Return: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts The full set of keys ``[input_ids, attention_mask, decoder_input_ids, labels]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.max_len self.set_src_lang_special_tokens(src_lang) model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length self.set_tgt_lang_special_tokens(tgt_lang) labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=True, **kwargs, )["input_ids"] model_inputs["labels"] = labels self.set_src_lang_special_tokens(src_lang) # sets to src_lang return model_inputs def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, cur_lang_code].""" self.cur_lang_code = self.lang_code_to_id[src_lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. Prefix [tgt_lang_code], suffix =[eos].""" self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_mbart.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see tokenization_utils_fast.py """ import itertools import logging import re import unicodedata from typing import Any, Dict, List, Optional, Tuple, Union, overload from utils.file_utils import add_end_docstrings from bart.tokenization.tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PaddingStrategy, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TensorType, TextInput, TextInputPair, TruncationStrategy, ) logger = logging.getLogger(__name__) def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def _is_end_of_word(text): """Checks whether the last character in text is one of a punctuation, control or whitespace character.""" last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char)) def _is_start_of_word(text): """Checks whether the first character in text is one of a punctuation, control or whitespace character.""" first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char)) @add_end_docstrings(INIT_TOKENIZER_DOCSTRING, """ .. automethod:: __call__""") class PreTrainedTokenizer(PreTrainedTokenizerBase): """ Base class for all slow tokenizers. Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`. Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ def __init__(self, **kwargs): super().__init__(**kwargs) # Added tokens - We store this for both slow and fast tokenizers # until the serialization of Fast tokenizers is updated self.added_tokens_encoder: Dict[str, int] = {} self.added_tokens_decoder: Dict[int, str] = {} self.unique_no_split_tokens: List[str] = [] @property def is_fast(self) -> bool: return False @property def vocab_size(self) -> int: """ :obj:`int`: Size of the base vocabulary (without the added tokens). """ raise NotImplementedError def get_vocab(self) -> Dict[str, int]: """ Returns the vocabulary as a dictionary of token to index. :obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when :obj:`token` is in the vocab. Returns: :obj:`Dict[str, int]`: The vocabulary. """ raise NotImplementedError() def get_added_vocab(self) -> Dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: :obj:`Dict[str, int]`: The added tokens. """ return self.added_tokens_encoder def __len__(self): """ Size of the full vocabulary with the added tokens. """ return self.vocab_size + len(self.added_tokens_encoder) def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens (:obj:`List[str]`or :obj:`List[tokenizers.AddedToken]`): Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the tokens should be added as special tokens. Returns: :obj:`int`: The number of tokens actually added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) """ new_tokens = [str(tok) for tok in new_tokens] tokens_to_add = [] for token in new_tokens: assert isinstance(token, str) if not special_tokens and self.init_kwargs.get("do_lower_case", False): token = token.lower() if ( token != self.unk_token and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and token not in tokens_to_add ): tokens_to_add.append(token) if self.verbose: logger.info("Adding %s to the vocabulary", token) added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add)) added_tok_decoder = {v: k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) # Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert) if special_tokens: self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens))) else: # Or on the newly added tokens self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add))) return len(tokens_to_add) def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. .. note:: This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: :obj:`int`: Number of special tokens added to sequences. """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def tokenize(self, text: TextInput, **kwargs) -> List[str]: """ Converts a string in a sequence of tokens, using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens. Args: text (:obj:`str`): The sequence to be encoded. **kwargs (additional keyword arguments): Passed along to the model-specific ``prepare_for_tokenization`` preprocessing method. Returns: :obj:`List[str]`: The list of tokens. """ # Simple mapping string => AddedToken for special tokens with specific tokenization behaviors all_special_tokens_extended = dict( (str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken) ) text, kwargs = self.prepare_for_tokenization(text, **kwargs) if kwargs: logger.warning(f"Keyword arguments {kwargs} not recognized.") # TODO: should this be in the base class? if self.init_kwargs.get("do_lower_case", False): # convert non-special tokens to lowercase escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text) def split_on_token(tok, text): result = [] tok_extended = all_special_tokens_extended.get(tok, None) split_text = text.split(tok) full_word = "" for i, sub_text in enumerate(split_text): # AddedToken can control whitespace stripping around them. # We use them for GPT2 and Roberta to have different behavior depending on the special token # Cf. https://github.com/huggingface/transformers/pull/2778 # and https://github.com/huggingface/transformers/issues/3788 if isinstance(tok_extended, AddedToken): if tok_extended.single_word: # Try to avoid splitting on token if ( i < len(split_text) - 1 and not _is_end_of_word(sub_text) and not _is_start_of_word(split_text[i + 1]) ): # Don't extract the special token full_word += sub_text + tok elif full_word: full_word += sub_text result += [full_word] full_word = "" continue # Strip white spaces on the right if tok_extended.rstrip and i > 0: # A bit counter-intuitive but we strip the left of the string # since tok_extended.rstrip means the special token is eating all white spaces on its right sub_text = sub_text.lstrip() # Strip white spaces on the left if tok_extended.lstrip and i < len(split_text) - 1: sub_text = sub_text.rstrip() # Opposite here else: # We strip left and right by default if i < len(split_text) - 1: sub_text = sub_text.rstrip() if i > 0: sub_text = sub_text.lstrip() if i == 0 and not sub_text: result += [tok] elif i == len(split_text) - 1: if sub_text: result += [sub_text] else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if not text.strip(): return [] if not tok_list: return self._tokenize(text) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if sub_text not in self.unique_no_split_tokens: tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return list( itertools.chain.from_iterable( ( self._tokenize(token) if token not in self.unique_no_split_tokens else [token] for token in tokenized_text ) ) ) no_split_token = self.unique_no_split_tokens tokenized_text = split_on_tokens(no_split_token, text) return tokenized_text def _tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. Args: token (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s). Returns: :obj:`int` or :obj:`List[int]`: The token id or list of token ids. """ if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_pretokenized: tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text))) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: if is_pretokenized: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_pretokenized=True`." ) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_pretokenized: tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text))) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_pretokenized and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def prepare_for_tokenization( self, text: str, is_pretokenized: bool = False, **kwargs ) -> Tuple[str, Dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining :obj:`kwargs` as well. We test the :obj:`kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: test (:obj:`str`): The text to prepare. is_pretokenized (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the text has been pretokenized. kwargs: Keyword arguments to use for the tokenization. Returns: :obj:`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs. """ return (text, kwargs) def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids of the first sequence. token_ids_1 (:obj:`List[int]`, `optional`): List of ids of the second sequence. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Wheter or not the token list is already formated with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) @overload def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ... @overload def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]: ... def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[str, List[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (:obj:`int` or :obj:`List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. Returns: :obj:`str` or :obj:`List[str]`: The decoded token(s). """ if isinstance(ids, int): if ids in self.added_tokens_decoder: return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self.added_tokens_decoder: tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index: int) -> str: raise NotImplementedError def convert_tokens_to_string(self, tokens: List[str]) -> str: """ Converts a sequence of token ids in a single string. The most simple way to do it is ``" ".join(tokens)`` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (:obj:`List[str]`): The token to join in a string. Return: The joined tokens. """ return " ".join(tokens) def decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids (:obj:`List[int]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. Returns: :obj:`str`: The decoded sentence. """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separatly for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = " ".join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory) -> Tuple[str]: """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens and special token mappings. .. warning:: Please use :meth:`~transformers.PreTrainedTokenizer.save_pretrained` to save the full tokenizer state if you want to reload it using the :meth:`~transformers.PreTrainedTokenizer.from_pretrained` class method. Args: save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved. Returns: A tuple of :obj:`str`: The files saved. """ raise NotImplementedError def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.AutoModelForSeq2SeqLM`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts The full set of keys ``[input_ids, attention_mask, labels]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ raise NotImplementedError( "If your model requires more than input_ids for a typical forward pass, you should implement this method. " "Returned keys should be [input_ids, attention_mask, labels]. See MarianTokenizer or T5Tokenizer for a " "reference implementation." )
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_utils.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import logging import os from functools import lru_cache import regex as re from tokenizers import ByteLevelBPETokenizer from bart.tokenization.tokenization_utils import AddedToken, PreTrainedTokenizer from bart.tokenization.tokenization_utils_base import BatchEncoding from bart.tokenization.tokenization_utils_fast import PreTrainedTokenizerFast logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json", "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json", }, "merges_file": { "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt", "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "gpt2": 1024, "gpt2-medium": 1024, "gpt2-large": 1024, "gpt2-xl": 1024, "distilgpt2": 1024, } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(PreTrainedTokenizer): """ GPT-2 BPE tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import GPT2Tokenizer >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> tokenizer("Hello world")['input_ids'] [15496, 995] >>> tokenizer(" Hello world")['input_ids'] [18435, 995] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_pretokenized=True``, this tokenizer will add a space before each word (even the first one). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to "replace"): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. unk_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The end of sequence token. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES["merges_file"]) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def prepare_for_tokenization(self, text, is_pretokenized=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if is_pretokenized or add_prefix_space: text = " " + text return (text, kwargs) class GPT2TokenizerFast(PreTrainedTokenizerFast): """ Constructs a "Fast" GPT-2 BPE tokenizer (backed by HuggingFace's `tokenizers` library), using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import GPT2TokenizerFast >>> tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") >>> tokenizer("Hello world")['input_ids'] [15496, 995] >>> tokenizer(" Hello world")['input_ids'] [18435, 995] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_pretokenized=True``, this tokenizer needs to be instantiated with ``add_prefix_space=True``. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to "replace"): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. unk_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The end of sequence token. add_prefix_space (:obj:`bool`, `optional`, defaults to `False`): Whether to add a leading space to the first word. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceeding space) trim_offsets (:obj:`bool`, `optional`, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, merges_file, unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, trim_offsets=True, **kwargs ): super().__init__( ByteLevelBPETokenizer( vocab_file=vocab_file, merges_file=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs, ) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_pretokenized = kwargs.get("is_pretokenized", False) assert self.add_prefix_space or not is_pretokenized, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_pretokenized = kwargs.get("is_pretokenized", False) assert self.add_prefix_space or not is_pretokenized, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kwargs)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_gpt2.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for XLM-RoBERTa model.""" import os from shutil import copyfile from typing import List, Optional from bart.tokenization.tokenization_utils import PreTrainedTokenizer from bart.tokenization.tokenization_xlnet import SPIECE_UNDERLINE from utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlm-roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-base-sentencepiece.bpe.model", "xlm-roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-dutch-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-spanish": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-spanish-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll03-english": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-english-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll03-german": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-german-sentencepiece.bpe.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class XLMRobertaTokenizer(PreTrainedTokenizer): """ Adapted from RobertaTokenizer and XLNetTokenizer SentencePiece based tokenizer. Peculiarities: - requires `SentencePiece <https://github.com/google/sentencepiece>`_ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. bos_token (:obj:`string`, `optional`, defaults to "<s>"): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`string`, `optional`, defaults to "</s>"): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. sep_token (:obj:`string`, `optional`, defaults to "</s>"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (:obj:`string`, `optional`, defaults to "<s>"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (:obj:`string`, `optional`, defaults to "<unk>"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`string`, `optional`, defaults to "<pad>"): The token used for padding, for example when batching sequences of different lengths. mask_token (:obj:`string`, `optional`, defaults to "<mask>"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs ): super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLM-R sequence has the following format: - single sequence: ``<s> X </s>`` - pair of sequences: ``<s> A </s></s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-R does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): return self.sp_model.EncodeAsPieces(text) def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory): """ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_xlm_roberta.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RoBERTa.""" import logging from typing import List, Optional from tokenizers.processors import RobertaProcessing from bart.tokenization.tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast from bart.tokenization.tokenization_utils import AddedToken logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", "roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", "roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json", "distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-vocab.json", "roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", "roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", }, "merges_file": { "roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", "roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", "roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt", "distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-merges.txt", "roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", "roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class RobertaTokenizer(GPT2Tokenizer): """ Constructs a RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import RobertaTokenizer >>> tokenizer = RobertaTokenizer.from_pretrained("roberta-base") >>> tokenizer("Hello world")['input_ids'] [0, 31414, 232, 328, 2] >>> tokenizer(" Hello world")['input_ids'] [0, 20920, 232, 2] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_pretokenized=True``, this tokenizer will add a space before each word (even the first one). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to "replace"): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. bos_token (:obj:`string`, `optional`, defaults to "<s>"): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`string`, `optional`, defaults to "</s>"): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. sep_token (:obj:`string`, `optional`, defaults to "</s>"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (:obj:`string`, `optional`, defaults to "<s>"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (:obj:`string`, `optional`, defaults to "<unk>"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`string`, `optional`, defaults to "<pad>"): The token used for padding, for example when batching sequences of different lengths. mask_token (:obj:`string`, `optional`, defaults to "<mask>"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file=vocab_file, merges_file=merges_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs, ) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: - single sequence: ``<s> X </s>`` - pair of sequences: ``<s> A </s></s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_pretokenized=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_pretokenized or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) class RobertaTokenizerFast(GPT2TokenizerFast): """ Constructs a "Fast" RoBERTa BPE tokenizer (backed by HuggingFace's `tokenizers` library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import RobertaTokenizerFast >>> tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") >>> tokenizer("Hello world")['input_ids'] [0, 31414, 232, 328, 2] >>> tokenizer(" Hello world")['input_ids'] [0, 20920, 232, 2] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_pretokenized=True``, this tokenizer needs to be instantiated with ``add_prefix_space=True``. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to "replace"): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. unk_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (:obj:`string`, `optional`, defaults to `<|endoftext|>`): The end of sequence token. add_prefix_space (:obj:`bool`, `optional`, defaults to `False`): Whether to add a leading space to the first word. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceeding space) trim_offsets (:obj:`bool`, `optional`, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, trim_offsets=True, **kwargs ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token kwargs.setdefault("pad_token", pad_token) kwargs.setdefault("sep_token", sep_token) kwargs.setdefault("cls_token", cls_token) kwargs.setdefault("mask_token", mask_token) super().__init__( vocab_file=vocab_file, merges_file=merges_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs, ) # This will add the necessary special tokens to the vocabulary if needed self.sanitize_special_tokens() self.backend_tokenizer._tokenizer.post_processor = RobertaProcessing( sep=(sep_token, self.sep_token_id), cls=(cls_token, self.cls_token_id), add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, ) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_roberta.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user fronting encoding methodes) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionnary of output with special method for the Fast tokenizers) """ import copy import json import logging import os import warnings from collections import OrderedDict, UserDict from enum import Enum from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np from tokenizers import AddedToken from tokenizers import Encoding as EncodingFast from utils.file_utils import ( add_end_docstrings, cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available, torch_required, ) if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch logger = logging.getLogger(__name__) VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER # Define type aliases and NamedTuples TextInput = str PreTokenizedInput = List[str] EncodedInput = List[int] TextInputPair = Tuple[str, str] PreTokenizedInputPair = Tuple[List[str], List[str]] EncodedInputPair = Tuple[List[int], List[int]] # Slow tokenizers used to be saved in three separated files SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file FULL_TOKENIZER_FILE = "tokenizer.json" class ExplicitEnum(Enum): """ Enum with more explicit error message for missing values. """ @classmethod def _missing_(cls, value): raise ValueError( "%r is not a valid %s, please select one of %s" % (value, cls.__name__, str(list(cls._value2member_map_.keys()))) ) class TruncationStrategy(ExplicitEnum): """ Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion in an IDE. """ ONLY_FIRST = "only_first" ONLY_SECOND = "only_second" LONGEST_FIRST = "longest_first" DO_NOT_TRUNCATE = "do_not_truncate" class PaddingStrategy(ExplicitEnum): """ Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion in an IDE. """ LONGEST = "longest" MAX_LENGTH = "max_length" DO_NOT_PAD = "do_not_pad" class TensorType(ExplicitEnum): """ Possible values for the ``return_tensors`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion in an IDE. """ PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" class CharSpan(NamedTuple): """ Character span in the original string. Args: start (:obj:`int`): Index of the first character in the original string. end (:obj:`int`): Index of the character following the last character in the original string. """ start: int end: int class TokenSpan(NamedTuple): """ Token span in an encoded string (list of tokens). Args: start (:obj:`int`): Index of the first token in the span. end (:obj:`int`): Index of the token following the last token in the span. """ start: int end: int class BatchEncoding(UserDict): """ Holds the output of the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.encode_plus` and :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.batch_encode` methods (tokens, attention_masks, etc). This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space. Args: data (:obj:`dict`): Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids', 'attention_mask', etc.). encoding (:obj:`tokenizers.Encoding` or :obj:`Sequence[tokenizers.Encoding]`, `optional`): If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/character space to token space the :obj:`tokenizers.Encoding` instance or list of instance (for batches) hold these informations. tensor_type (:obj:`Union[None, str, TensorType]`, `optional`): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to add a batch axis when converting to tensors (see :obj:`tensor_type` above). """ def __init__( self, data: Optional[Dict[str, Any]] = None, encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, tensor_type: Union[None, str, TensorType] = None, prepend_batch_axis: bool = False, ): super().__init__(data) if isinstance(encoding, EncodingFast): encoding = [encoding] self._encodings = encoding self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) @property def is_fast(self) -> bool: """ :obj:`bool`: Indicate whether this :class:`~transformers.BatchEncoding` was generated from the result of a :class:`~transformers.PreTrainedTokenizerFast` or not. """ return self._encodings is not None def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]: """ If the key is a string, returns the value of the dict associated to :obj:`key` ('input_ids', 'attention_mask', etc.). If the key is an integer, get the :obj:`tokenizers.Encoding` for batch item with index :obj:`key`. """ if isinstance(item, str): return self.data[item] elif self._encodings is not None: return self._encodings[item] else: raise KeyError( "Indexing with integers (to access backend Encoding for a given batch index) " "is not available when using Python based tokenizers" ) def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def __getstate__(self): return {"data": self.data, "encodings": self._encodings} def __setstate__(self, state): if "data" in state: self.data = state["data"] if "encodings" in state: self._encodings = state["encodings"] def keys(self): return self.data.keys() def values(self): return self.data.values() def items(self): return self.data.items() # After this point: # Extended properties and methods only available for fast (Rust-based) tokenizers # provided by HuggingFace tokenizers library. @property def encodings(self) -> Optional[List[EncodingFast]]: """ :obj:`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns :obj:`None` if the input was tokenized through Python (i.e., not a fast) tokenizer. """ return self._encodings def tokens(self, batch_index: int = 0) -> List[str]: """ Return the list of tokens (sub-parts of the input strings after word/subword splitting and before converstion to integer indices) at a given batch index (only works for the output of a fast tokenizer). Args: batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch. Returns: :obj:`List[str]`: The list of tokens at that index. """ if not self._encodings: raise ValueError("tokens() is not available when using Python-based tokenizers") return self._encodings[batch_index].tokens def words(self, batch_index: int = 0) -> List[Optional[int]]: """ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch. Returns: :obj:`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word). """ if not self._encodings: raise ValueError("words() is not available when using Python-based tokenizers") return self._encodings[batch_index].words def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. Can be called as: - ``self.token_to_word(token_index)`` if batch size is 1 - ``self.token_to_word(batch_index, token_index)`` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the token in the sequence. Returns: :obj:`int`: Index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_word() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_word(token_index) def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan: """ Get the encoded token span corresponding to a word in the sequence of the batch. Token spans are returned as a :class:`~transformers.tokenization_utils_base.TokenSpan` with: - **start** -- Index of the first token. - **end** -- Index of the token following the last token. Can be called as: - ``self.word_to_tokens(word_index)`` if batch size is 1 - ``self.word_to_tokens(batch_index, word_index)`` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_word_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence. word_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: :class:`~transformers.tokenization_utils_base.TokenSpan` Span of tokens in the encoded sequence. """ if not self._encodings: raise ValueError("word_to_tokens() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index if batch_index < 0: batch_index = self._batch_size + batch_index if word_index < 0: word_index = self._seq_len + word_index return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index))) def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan: """ Get the character span corresponding to an encoded token in a sequence of the batch. Character spans are returned as a :class:`~transformers.tokenization_utils_base.CharSpan` with: - **start** -- Index of the first character in the original string associated to the token. - **end** -- Index of the character following the last character in the original string associated to the token. Can be called as: - ``self.token_to_chars(token_index)`` if batch size is 1 - ``self.token_to_chars(batch_index, token_index)`` if batch size is greater or equal to 1 Args: batch_or_token_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the token or tokens in the sequence. Returns: :class:`~transformers.tokenization_utils_base.CharSpan`: Span of characters in the original string. """ if not self._encodings: raise ValueError("token_to_chars() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index))) def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int: """ Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch. Can be called as: - ``self.char_to_token(char_index)`` if batch size is 1 - ``self.char_to_token(batch_index, char_index)`` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence char_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: :obj:`int`: Index of the token. """ if not self._encodings: raise ValueError("char_to_token() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_token(char_index) def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan: """ Get the character span in the original string corresponding to given word in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: - start: index of the first character in the original string - end: index of the character following the last character in the original string Can be called as: - ``self.word_to_chars(word_index)`` if batch size is 1 - ``self.word_to_chars(batch_index, word_index)`` if batch size is greater or equal to 1 Args: batch_or_word_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence word_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: :obj:`CharSpan` or :obj:`List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with: - start: index of the first character associated to the token in the original string - end: index of the character following the last character associated to the token in the original string """ if not self._encodings: raise ValueError("word_to_chars() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index))) def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int: """ Get the word in the original string corresponding to a character in the original string of a sequence of the batch. Can be called as: - ``self.char_to_word(char_index)`` if batch size is 1 - ``self.char_to_word(batch_index, char_index)`` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the orginal string. char_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the character in the orginal string. Returns: :obj:`int` or :obj:`List[int]`: Index or indices of the associated encoded token(s). """ if not self._encodings: raise ValueError("char_to_word() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_word(char_index) def convert_to_tensors( self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False ): """ Convert the inner content to tensors. Args: tensor_type (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`): The type of tensors to use. If :obj:`str`, should be one of the values of the enum :class:`~transformers.tokenization_utils_base.TensorType`. If :obj:`None`, no modification is done. prepend_batch_axis (:obj:`int`, `optional`, defaults to :obj:`False`): Whether or not to add the batch dimension during the conversion. """ if tensor_type is None: return self # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW and is_tf_available(): as_tensor = tf.constant elif tensor_type == TensorType.PYTORCH and is_torch_available(): as_tensor = torch.tensor elif tensor_type == TensorType.NUMPY: as_tensor = np.asarray else: raise ImportError( "Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format( tensor_type ) ) # Do the tensor conversion in batch for key, value in self.items(): try: if prepend_batch_axis: value = [value] tensor = as_tensor(value) # Removing this for now in favor of controling the shape with `prepend_batch_axis` # # at-least2d # if tensor.ndim > 2: # tensor = tensor.squeeze(0) # elif tensor.ndim < 2: # tensor = tensor[None, :] self[key] = tensor except: # noqa E722 if key == "overflowing_tokens": raise ValueError( "Unable to create tensor returning overflowing tokens of different lengths. " "Please see if a fast version of this tokenizer is available to have this feature available." ) raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return self @torch_required def to(self, device: str) -> "BatchEncoding": """ Send all values to device by calling :obj:`v.to(device)` (PyTorch only). Args: device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on. Returns: :class:`~transformers.BatchEncoding`: The same instance of :class:`~transformers.BatchEncoding` after modification. """ self.data = {k: v.to(device) for k, v in self.data.items()} return self class SpecialTokensMixin: """ A mixin derived by :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast` to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independant manner and allow to set and update the special tokens. Args: bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the beginning of a sentence. eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the end of a sentence. unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing an out-of-vocabulary token. sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the class of the input (used by BERT for instance). mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A tuple or a list of additional special tokens. """ SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] def __init__(self, verbose=True, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._pad_token_type_id = 0 self._additional_special_tokens = [] self.verbose = verbose # We directly set the hidden value to allow initialization with special tokens # which are not yet in the vocabulary. Necesssary for serialization/de-serialization # TODO clean this up at some point (probably by sitching to fast tokenizers) for key, value in kwargs.items(): if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == "additional_special_tokens": assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" assert all(isinstance(t, str) for t in value), "One of the tokens is not a string" setattr(self, key, value) elif isinstance(value, (str, AddedToken)): setattr(self, key, value) else: raise TypeError( "special token {} has to be either str or AddedToken but got: {}".format(key, type(value)) ) def sanitize_special_tokens(self) -> int: """ Make sure that all the special tokens attributes of the tokenizer (:obj:`tokenizer.mask_token`, :obj:`tokenizer.cls_token`, etc.) are in the vocabulary. Add the missing ones to the vocabulary if needed. Return: :obj:`int`: The number of tokens added in the vocaulary during the operation. """ return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int: """ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). Using : obj:`add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens are carefully handled by the tokenizer (they are never split). - You can easily refer to special tokens using tokenizer class attributes like :obj:`tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance :class:`~transformers.BertTokenizer` :obj:`cls_token` is already registered to be :obj`'[CLS]'` and XLM's one is also registered to be :obj:`'</s>'`). Args: special_tokens_dict (dictionary `str` to `str` or :obj:`tokenizers.AddedToken`): Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: :obj:`int`: Number of tokens added to the vocabulary. Examples:: # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') special_tokens_dict = {'cls_token': '<CLS>'} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == '<CLS>' """ if not special_tokens_dict: return 0 added_tokens = 0 for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token" if self.verbose: logger.info("Assigning %s to the %s key of the tokenizer", value, key) setattr(self, key, value) if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all( isinstance(t, (str, AddedToken)) for t in value ), f"Tokens {value} for key {key} should all be str or AddedToken instances" added_tokens += self.add_tokens(value, special_tokens=True) else: assert isinstance( value, (str, AddedToken) ), f"Token {value} for key {key} should be a str or an AddedToken instance" added_tokens += self.add_tokens([value], special_tokens=True) return added_tokens def add_tokens( self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens (:obj:`str`, :obj:`tokenizers.AddedToken` or a list of `str` or :obj:`tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. :obj:`tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_token (:obj:`bool`, `optional`, defaults to :obj:`False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for :obj:`tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: :obj:`int`: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) """ if not new_tokens: return 0 if not isinstance(new_tokens, (list, tuple)): new_tokens = [new_tokens] return self._add_tokens(new_tokens, special_tokens=special_tokens) @property def bos_token(self) -> str: """ :obj:`str`: Beginning of sentence token. Log an error if used while not having been set. """ if self._bos_token is None and self.verbose: logger.error("Using bos_token, but it is not set yet.") return None return str(self._bos_token) @property def eos_token(self) -> str: """ :obj:`str`: End of sentence token. Log an error if used while not having been set. """ if self._eos_token is None and self.verbose: logger.error("Using eos_token, but it is not set yet.") return None return str(self._eos_token) @property def unk_token(self) -> str: """ :obj:`str`: Unknown token. Log an error if used while not having been set. """ if self._unk_token is None and self.verbose: logger.error("Using unk_token, but it is not set yet.") return None return str(self._unk_token) @property def sep_token(self) -> str: """ :obj:`str`: Separation token, to separate context and query in an input sequence. Log an error if used while not having been set. """ if self._sep_token is None and self.verbose: logger.error("Using sep_token, but it is not set yet.") return None return str(self._sep_token) @property def pad_token(self) -> str: """ :obj:`str`: Padding token. Log an error if used while not having been set. """ if self._pad_token is None and self.verbose: logger.error("Using pad_token, but it is not set yet.") return None return str(self._pad_token) @property def cls_token(self) -> str: """ :obj:`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ if self._cls_token is None and self.verbose: logger.error("Using cls_token, but it is not set yet.") return None return str(self._cls_token) @property def mask_token(self) -> str: """ :obj:`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. """ if self._mask_token is None and self.verbose: logger.error("Using mask_token, but it is not set yet.") return None return str(self._mask_token) @property def additional_special_tokens(self) -> List[str]: """ :obj:`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having been set. """ if self._additional_special_tokens is None and self.verbose: logger.error("Using additional_special_tokens, but it is not set yet.") return None return [str(tok) for tok in self._additional_special_tokens] @bos_token.setter def bos_token(self, value): self._bos_token = value @eos_token.setter def eos_token(self, value): self._eos_token = value @unk_token.setter def unk_token(self, value): self._unk_token = value @sep_token.setter def sep_token(self, value): self._sep_token = value @pad_token.setter def pad_token(self, value): self._pad_token = value @cls_token.setter def cls_token(self, value): self._cls_token = value @mask_token.setter def mask_token(self, value): self._mask_token = value @additional_special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value @property def bos_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns :obj:`None` if the token has not been set. """ if self._bos_token is None: return None return self.convert_tokens_to_ids(self.bos_token) @property def eos_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns :obj:`None` if the token has not been set. """ if self._eos_token is None: return None return self.convert_tokens_to_ids(self.eos_token) @property def unk_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the unknown token in the vocabulary. Returns :obj:`None` if the token has not been set. """ if self._unk_token is None: return None return self.convert_tokens_to_ids(self.unk_token) @property def sep_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input sequence. Returns :obj:`None` if the token has not been set. """ if self._sep_token is None: return None return self.convert_tokens_to_ids(self.sep_token) @property def pad_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been set. """ if self._pad_token is None: return None return self.convert_tokens_to_ids(self.pad_token) @property def pad_token_type_id(self) -> int: """ :obj:`int`: Id of the padding token type in the vocabulary. """ return self._pad_token_type_id @property def cls_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Returns :obj:`None` if the token has not been set. """ if self._cls_token is None: return None return self.convert_tokens_to_ids(self.cls_token) @property def mask_token_id(self) -> Optional[int]: """ :obj:`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language modeling. Returns :obj:`None` if the token has not been set. """ if self._mask_token is None: return None return self.convert_tokens_to_ids(self.mask_token) @property def additional_special_tokens_ids(self) -> List[int]: """ :obj:`List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.additional_special_tokens) @property def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]: """ :obj:`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (:obj:`cls_token`, :obj:`unk_token`, etc.) to their values (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.). Convert potential tokens of :obj:`tokenizers.AddedToken` type to string. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, "_" + attr) if attr_value: set_attr[attr] = str(attr_value) return set_attr @property def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]: """ :obj:`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (:obj:`cls_token`, :obj:`unk_token`, etc.) to their values (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.). Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, "_" + attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens(self) -> List[str]: """ :obj:`List[str]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes. Convert tokens of :obj:`tokenizers.AddedToken` type to string. """ all_toks = [str(s) for s in self.all_special_tokens_extended] return all_toks @property def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]: """ :obj:`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes. Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ all_toks = [] set_attr = self.special_tokens_map_extended for attr_value in set_attr.values(): all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]) all_toks = list(OrderedDict.fromkeys(all_toks)) return all_toks @property def all_special_ids(self) -> List[int]: """ :obj:`List[int]`: List the ids of the special tokens(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes. """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (:obj:`int`, `optional`, defaults to 0): If set to a number along with :obj:`max_length`, the overflowing tokens returned when :obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. is_pretokenized (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the input is already pre-tokenized (e.g., split into words), in which case the tokenizer will skip the pre-tokenization step. This is useful for NER or token classification. pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. """ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (:obj:`bool`, `optional`): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are token type IDs? <../glossary.html#token-type-ids>`__ return_attention_mask (:obj:`bool`, `optional`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return overflowing token sequences. return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`): Wheter or not to return special tokens mask information. return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return :obj:`(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from :class:`~transformers.PreTrainedTokenizerFast`, if using Python's tokenizer, this method will raise :obj:`NotImplementedError`. return_length (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return the lengths of the encoded inputs. verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to print informations and warnings. **kwargs: passed to the :obj:`self.tokenize()` method Return: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to a model. `What are input IDs? <../glossary.html#input-ids>`__ - **token_type_ids** -- List of token type ids to be fed to a model (when :obj:`return_token_type_ids=True` or if `"token_type_ids"` is in :obj:`self.model_input_names`). `What are token type IDs? <../glossary.html#token-type-ids>`__ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when :obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`). `What are attention masks? <../glossary.html#attention-mask>`__ - **overflowing_tokens** -- List of overflowing tokens sequences (when a :obj:`max_length` is specified and :obj:`return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a :obj:`max_length` is specified and :obj:`return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 0 specifying added special tokens and 1 specifying regual sequence tokens (when :obj:`add_special_tokens=True` and :obj:`return_special_tokens_mask=True`). - **length** -- The length of the inputs (when :obj:`return_length=True`) """ INIT_TOKENIZER_DOCSTRING = r""" Class attributes (overridden by derived classes) - **vocab_files_names** (:obj:`Dict[str, str]`) -- A ditionary with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - **pretrained_vocab_files_map** (:obj:`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the :obj:`url` to the associated pretrained vocabulary file. - **max_model_input_sizes** (:obj:`Dict[str, Optinal[int]]`) -- A dictionary with, as keys, the :obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or :obj:`None` if the model has no maximum input size. - **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the :obj:`short-cut-names` of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__`` method of the tokenizer class for this pretrained model when loading the tokenizer with the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained` method. - **model_input_names** (:obj:`List[str]`) -- A list of inputs expected in the forward pass of the model. - **padding_side** (:obj:`str`) -- The default value for the side on which the model should have padding applied. Should be :obj:`'right'` or :obj:`'left'`. Args: model_max_length (:obj:`int`, `optional`): The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`, this will be set to the value stored for the associated model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (:obj:`int(1e30)`). padding_side: (:obj:`str`, `optional`): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. model_input_names (:obj:`List[string]`, `optional`): The list of inputs accepted by the forward pass of the model (like :obj:`"token_type_ids"` or :obj:`"attention_mask"`). Default value is picked from the class attribute of the same name. bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the beginning of a sentence. Will be associated to ``self.bos_token`` and ``self.bos_token_id``. eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the end of a sentence. Will be associated to ``self.eos_token`` and ``self.eos_token_id``. unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing an out-of-vocabulary token. Will be associated to ``self.unk_token`` and ``self.unk_token_id``. sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to ``self.sep_token`` and ``self.sep_token_id``. pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to ``self.pad_token`` and ``self.pad_token_id``. cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing the class of the input (used by BERT for instance). Will be associated to ``self.cls_token`` and ``self.cls_token_id``. mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to ``self.mask_token`` and ``self.mask_token_id``. additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`): A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``. """ PREPARE_SEQ2SEQ_BATCH_DOCSTRING = """ Arguments: src_texts: (:obj:`list`): list of documents to summarize or source language texts tgt_texts: (:obj:`list`, `optional`): list of tgt language texts or summaries. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). Return: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerBase(SpecialTokensMixin): """ Base class for :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`. Handles shared (mostly boiler plate) methods for those two classes. """ vocab_files_names: Dict[str, str] = {} pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} pretrained_init_configuration: Dict[str, Dict[str, Any]] = {} max_model_input_sizes: Dict[str, Optional[int]] = {} model_input_names: List[str] = ["token_type_ids", "attention_mask"] padding_side: str = "right" def __init__(self, **kwargs): # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () self.init_kwargs = kwargs # For backward compatibility we fallback to set model_max_length from max_len if provided model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER # Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed. self.padding_side = kwargs.pop("padding_side", self.padding_side) assert self.padding_side in [ "right", "left", ], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) super().__init__(**kwargs) @property def max_len(self) -> int: """ :obj:`int`: **Deprecated** Kept here for backward compatibility. Now renamed to :obj:`model_max_length` to avoid ambiguity. """ warnings.warn( "The `max_len` attribute has been deprecated and will be removed in a future version, use `model_max_length` instead.", FutureWarning, ) return self.model_max_length @property def max_len_single_sentence(self) -> int: """ :obj:`int`: The maximum length of a sentence that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=False) @property def max_len_sentences_pair(self) -> int: """ :obj:`int`: The maximum combined length of a pair of sentences that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=True) @max_len_single_sentence.setter def max_len_single_sentence(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_single_sentence'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: logger.warning( "Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up." ) else: raise ValueError( "Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up." ) @max_len_sentences_pair.setter def max_len_sentences_pair(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_sentences_pair'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: logger.warning( "Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up." ) else: raise ValueError( "Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up." ) @classmethod def from_pretrained(cls, *inputs, **kwargs): r""" Instantiate a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase` (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path (:obj:`str`): Can be either: - A string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g., ``bert-base-uncased``. - A string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g., ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained` method, e.g., ``./my_model_directory/``. - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., ``./my_model_directory/vocab.txt``. cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. inputs (additional positional arguments, `optional`): Will be passed along to the Tokenizer ``__init__`` method. kwargs (additional keyword arguments, `optional`): Will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__`` for more details. Examples:: # We can't instantiate directly the base class `PreTrainedTokenizerBase` so let's show our examples on a derived class: BertTokenizer # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased') # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt') # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>') # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == '<unk>' """ return cls._from_pretrained(*inputs, **kwargs) @classmethod def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} init_configuration = {} if pretrained_model_name_or_path in s3_models: # Get the vocabulary from AWS S3 bucket for file_id, map_list in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] if ( cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration ): init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy() else: # Get the vocabulary from local files logger.info( "Model name '{}' not found in model shortcut name list ({}). " "Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path ) ) if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1: raise ValueError( "Calling {}.from_pretrained() with the path to a single file or url is not supported." "Use a model identifier or the path to a directory instead.".format(cls.__name__) ) logger.warning( "Calling {}.from_pretrained() with the path to a single file or url is deprecated".format( cls.__name__ ) ) file_id = list(cls.vocab_files_names.keys())[0] vocab_files[file_id] = pretrained_model_name_or_path else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name additional_files_names = { "added_tokens_file": ADDED_TOKENS_FILE, "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, "tokenizer_config_file": TOKENIZER_CONFIG_FILE, "full_tokenizer_file": FULL_TOKENIZER_FILE, } # Look for the tokenizer files for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items(): if os.path.isdir(pretrained_model_name_or_path): full_file_name = os.path.join(pretrained_model_name_or_path, file_name) if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None else: full_file_name = hf_bucket_url( pretrained_model_name_or_path, filename=file_name, use_cdn=False, mirror=None ) vocab_files[file_id] = full_file_name # Get files from url, cache, or disk depending on the case try: resolved_vocab_files = {} for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path( file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) except EnvironmentError: if pretrained_model_name_or_path in s3_models: msg = "Couldn't reach server at '{}' to download vocabulary files." else: msg = ( "Model name '{}' was not found in tokenizers model name list ({}). " "We assumed '{}' was a path or url to a directory containing vocabulary files " "named {}, but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()), ) ) raise EnvironmentError(msg) if all(full_file_name is None for full_file_name in resolved_vocab_files.values()): raise EnvironmentError( "Model name '{}' was not found in tokenizers model name list ({}). " "We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files " "named {} but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()), ) ) for file_id, file_path in vocab_files.items(): if file_path == resolved_vocab_files[file_id]: logger.info("loading file {}".format(file_path)) else: logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id])) # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: init_kwargs = json.load(tokenizer_config_handle) saved_init_inputs = init_kwargs.pop("init_inputs", ()) if not init_inputs: init_inputs = saved_init_inputs else: init_kwargs = init_configuration # Update with newly provided kwargs init_kwargs.update(kwargs) # Set max length if needed if pretrained_model_name_or_path in cls.max_model_input_sizes: # if we're using a pretrained model, ensure the tokenizer # wont index sequences longer than the number of positional embeddings model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path] if model_max_length is not None and isinstance(model_max_length, (int, float)): init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path # Instantiate tokenizer. try: tokenizer = cls(*init_inputs, **init_kwargs) except OSError: raise OSError( "Unable to load vocabulary from file. " "Please check that the provided vocabulary is accessible and not corrupted." ) # Save inputs and kwargs for saving and re-loading with ``save_pretrained`` tokenizer.init_inputs = init_inputs tokenizer.init_kwargs = init_kwargs # If there is a complementary special token map, load it special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) if special_tokens_map_file is not None: with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if isinstance(value, dict): value = AddedToken(**value) elif isinstance(value, list): value = [AddedToken(**token) if isinstance(token, dict) else token for token in value] setattr(tokenizer, key, value) # Add supplementary tokens. special_tokens = tokenizer.all_special_tokens if added_tokens_file is not None: with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: added_tok_encoder = json.load(added_tokens_handle) # Sort added tokens by index added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1])) for token, index in added_tok_encoder_sorted: assert index == len(tokenizer), ( f"Non-consecutive added token '{token}' found. " f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary." ) tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens)) # Check all our special tokens are registrered as "no split" token (we don't cut them) and are in the vocab added_tokens = tokenizer.sanitize_special_tokens() if added_tokens: logger.warning( "Special tokens have been added in the vocabulary, make sure the associated word emebedding are fine-tuned or trained." ) return tokenizer def save_pretrained(self, save_directory: str) -> Tuple[str]: """ Save the tokenizer vocabulary files together with: - added tokens, - special tokens to class attributes mapping, - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert). This method make sure the full tokenizer can then be re-loaded using the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained` class method. .. Warning:: This won't save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying :obj:`tokenizer.do_lower_case` after creation). Args: save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved. Returns: A tuple of :obj:`str`: The files saved. """ if os.path.isfile(save_directory): logger.error("Provided path ({}) should be a directory, not a file".format(save_directory)) return os.makedirs(save_directory, exist_ok=True) special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE) tokenizer_config = copy.deepcopy(self.init_kwargs) if len(self.init_inputs) > 0: tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) with open(tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_config, ensure_ascii=False)) with open(special_tokens_map_file, "w", encoding="utf-8") as f: write_dict = {} for key, value in self.special_tokens_map_extended.items(): if isinstance(value, AddedToken): write_dict[key] = value.__getstate__() elif isinstance(value, list): write_dict[key] = [ token.__getstate__() if isinstance(token, AddedToken) else token for token in value ] else: write_dict[key] = value f.write(json.dumps(write_dict, ensure_ascii=False)) added_vocab = self.get_added_vocab() if added_vocab: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(added_vocab, ensure_ascii=False) f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return vocab_files + (special_tokens_map_file, added_tokens_file) @add_end_docstrings( ENCODE_KWARGS_DOCSTRING, """ **kwargs: Passed along to the `.tokenize()` method. """, """ Returns: :obj:`List[int]`, :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`: The tokenized ids of the text. """, ) def encode( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs ) -> List[int]: """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the ``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids`` method). text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the ``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids`` method). """ encoded_inputs = self.encode_plus( text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def num_special_tokens_to_add(self, pair: bool = False) -> int: raise NotImplementedError def _get_padding_truncation_strategies( self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs ): """ Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy and pad_to_max_length) and behaviors. """ old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate") old_pad_to_max_length = kwargs.pop("pad_to_max_length", False) # Backward compatibility for previous behavior, maybe we should deprecate it: # If you only set max_length, it activates truncation for max_length if max_length is not None and padding is False and truncation is False: if verbose: logger.warning( "Truncation was not explicitely activated but `max_length` is provided a specific value, " "please use `truncation=True` to explicitely truncate examples to max length. " "Defaulting to 'longest_first' truncation strategy. " "If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy " "more precisely by providing a specific strategy to `truncation`." ) truncation = "longest_first" # Get padding strategy if padding is False and old_pad_to_max_length: if verbose: warnings.warn( "The `pad_to_max_length` argument is deprecated and will be removed in a future version, " "use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or " "use `padding='max_length'` to pad to a max length. In this case, you can give a specific " "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the " "maximal input size of the model (e.g. 512 for Bert).", FutureWarning, ) if max_length is None: padding_strategy = PaddingStrategy.LONGEST else: padding_strategy = PaddingStrategy.MAX_LENGTH elif padding is not False: if padding is True: padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(padding, PaddingStrategy): padding_strategy = PaddingStrategy(padding) else: padding_strategy = PaddingStrategy.DO_NOT_PAD # Get truncation strategy if truncation is False and old_truncation_strategy != "do_not_truncate": if verbose: warnings.warn( "The `truncation_strategy` argument is deprecated and will be removed in a future version, " "use `truncation=True` to truncate examples to a max length. You can give a specific " "length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the " "maximal input size of the model (e.g. 512 for Bert). " " If you have pairs of inputs, you can give a specific truncation strategy selected among " "`truncation='only_first'` (will only truncate the first sentence in the pairs) " "`truncation='only_second'` (will only truncate the second sentence in the pairs) " "or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).", FutureWarning, ) truncation_strategy = TruncationStrategy(old_truncation_strategy) elif truncation is not False: if truncation is True: truncation_strategy = ( TruncationStrategy.LONGEST_FIRST ) # Default to truncate the longest sequences in pairs of inputs elif not isinstance(truncation, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation) else: truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: if self.model_max_length > LARGE_INTEGER: if verbose: logger.warning( "Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. " "Default to no padding." ) padding_strategy = PaddingStrategy.DO_NOT_PAD else: max_length = self.model_max_length if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: if self.model_max_length > LARGE_INTEGER: if verbose: logger.warning( "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. " "Default to no truncation." ) truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE else: max_length = self.model_max_length # Test if we have a padding token if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0): raise ValueError( "Asking to pad but the tokenizer does not have a padding token. " "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`." ) # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided if ( truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and padding_strategy != PaddingStrategy.DO_NOT_PAD and pad_to_multiple_of is not None and max_length is not None and (max_length % pad_to_multiple_of != 0) ): raise ValueError( f"Truncation and padding are both activated but " f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})." ) return padding_strategy, truncation_strategy, max_length, kwargs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set :obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences). text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set :obj:`is_pretokenized=True` (to lift the ambiguity with a batch of sequences). """ # Input type checking for clearer error assert isinstance(text, str) or ( isinstance(text, (list, tuple)) and ( len(text) == 0 or ( isinstance(text[0], str) or (isinstance(text[0], (list, tuple)) and (len(text[0]) == 0 or isinstance(text[0][0], str))) ) ) ), ( "text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " "or `List[List[str]]` (batch of pretokenized examples)." ) assert ( text_pair is None or isinstance(text_pair, str) or ( isinstance(text_pair, (list, tuple)) and ( len(text_pair) == 0 or ( isinstance(text_pair[0], str) or ( isinstance(text_pair[0], (list, tuple)) and (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str)) ) ) ) ) ), ( "text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) " "or `List[List[str]]` (batch of pretokenized examples)." ) is_batched = bool( (not is_pretokenized and isinstance(text, (list, tuple))) or (is_pretokenized and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))) ) if is_batched: batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_pretokenized=is_pretokenized, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_pretokenized=is_pretokenized, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, ``__call__`` should be used instead. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the latter only for not-fast tokenizers)): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the ``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids`` method). text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the ``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids`` method). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_pretokenized=is_pretokenized, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: raise NotImplementedError @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. .. warning:: This method is deprecated, ``__call__`` should be used instead. Args: batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in ``encode_plus``). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_pretokenized=is_pretokenized, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: raise NotImplementedError def pad( self, encoded_inputs: Union[ BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with ``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``) .. note:: If the ``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with ``return_tensors``. In the case of PyTorch tensors, you will lose the specific device of your tensors however. Args: encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`): Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or :obj:`Dict[str, List[int]]`) or a batch of tokenized inputs (list of :class:`~transformers.BatchEncoding`, `Dict[str, List[List[int]]]` or `List[Dict[str, List[int]]]`) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of :obj:`List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask (:obj:`bool`, `optional`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to print informations and warnings. """ # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} assert "input_ids" in encoded_inputs, ( "You should supply an encoding or a list of encodings to this method. " "An encoding is the output of one the encoding methods of the tokenizer, i.e. " "__call__/encode_plus/batch_encode_plus. " ) if not encoded_inputs["input_ids"]: if return_attention_mask: encoded_inputs["attention_mask"] = [] return encoded_inputs # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch first_element = encoded_inputs["input_ids"][0] if isinstance(first_element, (list, tuple)) and first_element: first_element = first_element[0] if not isinstance(first_element, int): if is_tf_available() and isinstance(first_element, tf.Tensor): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_available() and isinstance(first_element, torch.Tensor): return_tensors = "pt" if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( f"type of {first_element} unknown: {type(first_element)}. " f"Should be one of a python, numpy, pytorch or tensorflow object." ) def to_py_obj(obj): if isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] elif is_tf_available() and isinstance(obj, tf.Tensor): return obj.numpy().tolist() elif is_torch_available() and isinstance(obj, torch.Tensor): return obj.cpu().tolist() elif isinstance(obj, np.ndarray): return obj.tolist() else: return obj for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( padding=padding, max_length=max_length, verbose=verbose ) if encoded_inputs["input_ids"] and not isinstance(encoded_inputs["input_ids"][0], (list, tuple)): encoded_inputs = self._pad( encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(encoded_inputs["input_ids"]) assert all( len(v) == batch_size for v in encoded_inputs.values() ), "Some items in the output dictionnary have a different batch size than others." if padding_strategy == PaddingStrategy.LONGEST: max_length = max(len(inputs) for inputs in encoded_inputs["input_ids"]) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = dict((k, v[i]) for k, v in encoded_inputs.items()) outputs = self._pad( inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create the token type IDs corresponding to the sequences passed. `What are token type IDs? <../glossary.html#token-type-ids>`__ Should be overriden in a subclass if the model has a special way of building those. Args: token_ids_0 (:obj:`List[int]`): The first tokenized sequence. token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence. Returns: :obj:`List[int]`: The token type ids. """ if token_ids_1 is None: return len(token_ids_0) * [0] return [0] * len(token_ids_0) + [1] * len(token_ids_1) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. This implementation does not add special tokens and this method should be overriden in a subclass. Args: token_ids_0 (:obj:`List[int]`): The first tokenized sequence. token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence. Returns: :obj:`List[int]`: The model input with special tokens. """ if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: ids (:obj:`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. pair_ids (:obj:`List[int]`, `optional`): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. """ if "return_lengths" in kwargs: if verbose: warnings.warn( "The PreTrainedTokenizerBase.prepare_for_model `return_lengths` parameter is deprecated. " "Please use `return_length` instead.", FutureWarning, ) return_length = kwargs["return_lengths"] # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else []) # Build output dictionnary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Check lengths if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose: logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " "for this model ({} > {}). Running this sequence through the model will result in " "indexing errors".format(len(ids), self.model_max_length) ) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], pair_ids: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (:obj:`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. pair_ids (:obj:`List[int]`, `optional`): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize`` and ``convert_tokens_to_ids`` methods. num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0): Number of tokens to remove using the truncation strategy. truncation (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): The strategy to follow for truncation. Can be: * :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (:obj:`int`, `optional`, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: :obj:`Tuple[List[int], List[int], List[int]]`: The truncated ``ids``, the truncated ``pair_ids`` and the list of overflowing tokens. """ if num_tokens_to_remove <= 0: return ids, pair_ids, [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] if truncation_strategy == TruncationStrategy.LONGEST_FIRST: for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): if not overflowing_tokens: window_len = min(len(ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(ids[-window_len:]) ids = ids[:-1] else: if not overflowing_tokens: window_len = min(len(pair_ids), stride + 1) else: window_len = 1 overflowing_tokens.extend(pair_ids[-window_len:]) pair_ids = pair_ids[:-1] elif truncation_strategy == TruncationStrategy.ONLY_FIRST: if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input" f"but the first sequence has a length {len(ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_second'." ) elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input" f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " f"for instance 'longest_first' or 'only_first'." ) return (ids, pair_ids, overflowing_tokens) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined legnth or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs["input_ids"]) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = ( padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length ) if needs_to_be_padded: difference = max_length - len(encoded_inputs["input_ids"]) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"]) if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) else: if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) return encoded_inputs def batch_decode( self, sequences: List[List[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> List[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (:obj:`List[List[int]]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. Returns: :obj:`List[str]`: The list of decoded sentences. """ return [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces ) for seq in sequences ] def decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids (:obj:`List[int]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. Returns: :obj:`str`: The decoded sentence. """ raise NotImplementedError def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids of the first sequence. token_ids_1 (:obj:`List[int]`, `optional`): List of ids of the second sequence. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Wheter or not the token list is already formated with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ assert already_has_special_tokens and token_ids_1 is None, ( "You cannot use ``already_has_special_tokens=False`` with this tokenizer. " "Please use a slow (full python) tokenizer to activate this argument." "Or set `return_special_token_mask=True` when calling the encoding method " "to get the special tokens mask in any tokenizer. " ) all_special_ids = self.all_special_ids # cache the property special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] return special_tokens_mask @staticmethod def clean_up_tokenization(out_string: str) -> str: """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. Args: out_string (:obj:`str`): The text to clean up. Returns: :obj:`str`: The cleaned-up string. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_utils_base.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import List, Optional from bart.tokenization.tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast from bart.tokenization.tokenization_utils_base import BatchEncoding logger = logging.getLogger(__name__) # vocab and merges same as roberta vocab_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json" merges_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt" _all_bart_models = [ "facebook/bart-base", "facebook/bart-large", "facebook/bart-large-mnli", "facebook/bart-large-cnn", "facebook/bart-large-xsum", "yjernite/bart_eli5", ] class BartTokenizer(RobertaTokenizer): # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, } def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length decoder_inputs: BatchEncoding = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) for k, v in decoder_inputs.items(): model_inputs[f"decoder_{k}"] = v return model_inputs class BartTokenizerFast(RobertaTokenizerFast): # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, } def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length decoder_inputs: BatchEncoding = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) for k, v in decoder_inputs.items(): model_inputs[f"decoder_{k}"] = v return model_inputs
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_bart.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/__init__.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for XLNet model.""" import os import unicodedata from shutil import copyfile from typing import List, Optional from bart.tokenization.tokenization_utils import PreTrainedTokenizer from utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-spiece.model", "xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-spiece.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlnet-base-cased": None, "xlnet-large-cased": None, } SPIECE_UNDERLINE = "▁" # Segments (not really needed) SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 class XLNetTokenizer(PreTrainedTokenizer): """ Constructs an XLNet tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`string`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase the input when tokenizing. remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to keep accents when tokenizing. bos_token (:obj:`string`, `optional`, defaults to "<s>"): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`string`, `optional`, defaults to "</s>"): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`string`, `optional`, defaults to "<unk>"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`string`, `optional`, defaults to "<sep>"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`string`, `optional`, defaults to "<pad>"): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`string`, `optional`, defaults to "<cls>"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`string`, `optional`, defaults to "<mask>"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES padding_side = "left" def __init__( self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token="<s>", eos_token="</s>", unk_token="<unk>", sep_token="<sep>", pad_token="<pad>", cls_token="<cls>", mask_token="<mask>", additional_special_tokens=["<eop>", "<eod>"], **kwargs ): super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self._pad_token_type_id = 3 try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = " ".join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text, sample=False): """ Tokenize a string. """ text = self.preprocess_text(text) if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: ``X <sep> <cls>`` - pair of sequences: ``A <sep> B <sep> <cls>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return token_ids_0 + sep + cls return token_ids_0 + sep + token_ids_1 + sep + cls def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1] return ([0] * len(token_ids_0)) + [1, 1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 2 | first sequence | second sequence | CLS segment ID if token_ids_1 is None, only returns the first portion of the mask (0's). Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls_segment_id = [2] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] + cls_segment_id return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id def save_vocabulary(self, save_directory): """ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_xlnet.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library). For slow (python) tokenizers see tokenization_utils.py """ import logging import os from collections import defaultdict from typing import Any, Dict, List, Optional, Tuple, Union from tokenizers import Encoding as EncodingFast from tokenizers.decoders import Decoder as DecoderFast from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast from utils.file_utils import add_end_docstrings from bart.tokenization.tokenization_utils_base import ( INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) logger = logging.getLogger(__name__) @add_end_docstrings( INIT_TOKENIZER_DOCSTRING, """ .. automethod:: __call__ """, ) class PreTrainedTokenizerFast(PreTrainedTokenizerBase): """ Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`. Handles all the shared methods for tokenization and special tokens, as well as methods for downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary. This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ def __init__(self, tokenizer: BaseTokenizerFast, **kwargs): if not isinstance(tokenizer, BaseTokenizerFast): raise ValueError( "Tokenizer should be an instance of a BaseTokenizer " "provided by HuggingFace tokenizers library." ) self._tokenizer: BaseTokenizerFast = tokenizer # We call this after having initialized the backend tokenizer because we update it. super().__init__(**kwargs) @property def is_fast(self) -> bool: return True @property def vocab_size(self) -> int: """ :obj:`int`: Size of the base vocabulary (without the added tokens). """ return self._tokenizer.get_vocab_size(with_added_tokens=False) def get_vocab(self) -> Dict[str, int]: """ Returns the vocabulary as a dictionary of token to index. :obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when :obj:`token` is in the vocab. Returns: :obj:`Dict[str, int]`: The vocabulary. """ return self._tokenizer.get_vocab(with_added_tokens=True) def get_added_vocab(self) -> Dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Returns: :obj:`Dict[str, int]`: The added tokens. """ base_vocab = self._tokenizer.get_vocab(with_added_tokens=False) full_vocab = self._tokenizer.get_vocab(with_added_tokens=True) added_vocab = dict((tok, index) for tok, index in full_vocab.items() if tok not in base_vocab) return added_vocab def __len__(self) -> int: """ Size of the full vocabulary with the added tokens. """ return self._tokenizer.get_vocab_size(with_added_tokens=True) @property def backend_tokenizer(self) -> BaseTokenizerFast: """ :obj:`tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend. """ return self._tokenizer @property def decoder(self) -> DecoderFast: """ :obj:`tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer. """ return self._tokenizer._tokenizer.decoder def _convert_encoding( self, encoding: EncodingFast, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> Dict[str, Any]: """ Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). Output shape: (overflows, sequence length) """ if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_overflowing_tokens and encoding.overflowing is not None: encodings = [encoding] + encoding.overflowing else: encodings = [encoding] encoding_dict = defaultdict(list) for e in encodings: encoding_dict["input_ids"].append(e.ids) if return_token_type_ids: encoding_dict["token_type_ids"].append(e.type_ids) if return_attention_mask: encoding_dict["attention_mask"].append(e.attention_mask) if return_special_tokens_mask: encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) if return_offsets_mapping: encoding_dict["offset_mapping"].append(e.offsets) if return_length: encoding_dict["length"].append(len(e.ids)) return encoding_dict def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. Args: token (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s). Returns: :obj:`int` or :obj:`List[int]`: The token id or list of token ids. """ if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token: str) -> int: index = self._tokenizer.token_to_id(token) if index is None: return self.unk_token_id return index def _convert_id_to_token(self, index: int) -> Optional[str]: return self._tokenizer.id_to_token(int(index)) def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int: if special_tokens: return self._tokenizer.add_special_tokens(new_tokens) return self._tokenizer.add_tokens(new_tokens) def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. .. note:: This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: :obj:`int`: Number of special tokens added to sequences. """ return self._tokenizer.num_special_tokens_to_add(pair) def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[str, List[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (:obj:`int` or :obj:`List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. Returns: :obj:`str` or :obj:`List[str]`: The decoded token(s). """ if isinstance(ids, int): return self._tokenizer.id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue tokens.append(self._tokenizer.id_to_token(index)) return tokens def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False) -> List[str]: """ Converts a string in a sequence of tokens, using the backend Rust tokenizer. Args: text (:obj:`str`): The sequence to be encoded. pair (:obj:`str`, `optional`): A second sequence to be encoded with the first. add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to add the special tokens associated with the corresponding model. Returns: :obj:`List[str]`: The list of tokens. """ return self._tokenizer.encode(text, pair, add_special_tokens=add_special_tokens).tokens def set_truncation_and_padding( self, padding_strategy: PaddingStrategy, truncation_strategy: TruncationStrategy, max_length: int, stride: int, pad_to_multiple_of: Optional[int], ): """ Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed section. Args: padding_strategy (:class:`~transformers.tokenization_utils_base.PaddingStrategy`): The kind of padding that will be applied to the input truncation_strategy (:class:`~transformers.tokenization_utils_base.TruncationStrategy`): The kind of truncation that will be applied to the input max_length (:obj:`int`): The maximum size of a sequence. stride (:obj:`int`): The stride to use when handling overflow. pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ # Set truncation and padding on the backend tokenizer if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: self._tokenizer.enable_truncation(max_length, stride=stride, strategy=truncation_strategy.value) else: self._tokenizer.no_truncation() if padding_strategy != PaddingStrategy.DO_NOT_PAD: self._tokenizer.enable_padding( length=max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None, direction=self.padding_side, pad_id=self.pad_token_id, pad_type_id=self.pad_token_type_id, pad_token=self.pad_token, pad_to_multiple_of=pad_to_multiple_of, ) else: self._tokenizer.no_padding() def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise ValueError( "batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs)) ) if kwargs: raise ValueError(f"Keyword arguments {kwargs} not recognized.") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, ) # Avoid thread overhead if only one example. if len(batch_text_or_text_pairs) == 1: if isinstance(batch_text_or_text_pairs[0], tuple): # We got a Tuple with a pair of sequences encodings = self._tokenizer.encode( *batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens, is_pretokenized=is_pretokenized, ) else: # We got a single sequence encodings = self._tokenizer.encode( batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens, is_pretokenized=is_pretokenized, ) encodings = [encodings] else: encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_pretokenized ) # Convert encoding to dict # `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]] # with nested dimensions corresponding to batch, overflows, sequence length tokens = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] sanitized = {} for key in tokens[0].keys(): # To List[List[List[int]]] of shape (batch, overflows, sequence length) stack = [e for item in tokens for e in item[key]] sanitized[key] = stack # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, enc in enumerate(tokens): overflow_to_sample_mapping += [i] * len(enc["input_ids"]) sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping return BatchEncoding(sanitized, encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_pretokenized: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs ) -> BatchEncoding: batched_input = [(text, text_pair)] if text_pair else [text] batched_output = self._batch_encode_plus( batched_input, is_pretokenized=is_pretokenized, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overfolwing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) return batched_output def decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids (:obj:`List[int]`): List of tokenized input ids. Can be obtained using the ``__call__`` method. skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean up the tokenization spaces. Returns: :obj:`str`: The decoded sentence. """ text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str) -> Tuple[str]: """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens and special token mappings. .. warning:: Please use :meth:`~transformers.PreTrainedTokenizer.save_pretrained` to save the full tokenizer state if you want to reload it using the :meth:`~transformers.PreTrainedTokenizer.from_pretrained` class method. Args: save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved. Returns: A tuple of :obj:`str`: The files saved. """ if os.path.isdir(save_directory): files = self._tokenizer.save_model(save_directory) else: folder, file = os.path.split(os.path.abspath(save_directory)) files = self._tokenizer.save_model(folder, name=file) return tuple(files)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/tokenization/tokenization_utils_fast.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from dataclasses import dataclass from typing import List, Optional, Tuple import torch from utils.file_utils import ModelOutput @dataclass class BaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if ``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if ``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseModelOutputWithCrossAttentions(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if ``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if ``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if ``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if ``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Language modeling loss (for next-token prediction). logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CausalLMOutputWithPast(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Language modeling loss (for next-token prediction). logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Language modeling loss (for next-token prediction). logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if ``config.is_decoder = True``. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class SequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Masked language modeling (MLM) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Language modeling loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided): Next sequence prediction (classification) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class SequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class TokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class QuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/modeling_outputs.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BART model, ported from the fairseq repo.""" import logging import math import random import warnings from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss from utils.activations import ACT2FN from bart.configuration.configuration_bart import BartConfig from utils.file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from bart.modeling.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from bart.modeling.modeling_utils import PreTrainedModel logger = logging.getLogger(__name__) _CONFIG_FOR_DOC = "BartConfig" _TOKENIZER_FOR_DOC = "BartTokenizer" BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/bart-large", # See all BART models at https://huggingface.co/models?filter=bart ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), float("-inf")) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True): ''' if torch.cuda.is_available(): try: from apex.normalization import FusedLayerNorm return FusedLayerNorm(normalized_shape, eps, elementwise_affine) except ImportError: pass ''' return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class BartLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): assert padding_idx is not None, "`padding_idx` should not be None, but of type int" # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models dont have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, padding_idx=padding_idx) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions + self.offset) class BartAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})." self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( bsz * self.num_heads, tgt_len, src_len, ), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" if attention_mask is not None: assert attention_mask.size() == ( bsz, 1, tgt_len, src_len, ), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask.to(attn_weights.dtype) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1, dtype=attn_weights.dtype) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( bsz * self.num_heads, tgt_len, self.head_dim, ), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" attn_output = ( attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) .transpose(1, 2) .reshape(bsz, tgt_len, embed_dim) ) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class BartEncoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BartAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.pre_ln = config.pre_ln def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states if self.pre_ln: dtype = hidden_states.dtype hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states else: hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) dtype = hidden_states.dtype hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class BartDecoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BartAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = BartAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.pre_ln = config.pre_ln def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, encoder_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size `(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple if self.pre_ln: dtype = hidden_states.dtype hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states = self.encoder_attn_layer_norm(hidden_states).to(dtype) hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states else: hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) dtype = hidden_states.dtype hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states).to(dtype) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BartPretrainedModel(PreTrainedModel): config_class = BartConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs class PretrainedBartModel(BartPretrainedModel): def __init_subclass__(self): warnings.warn( "The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.", FutureWarning, ) BART_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BART_GENERATION_EXAMPLE = r""" Summarization example:: >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True) >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]) Mask filling example:: >>> from transformers import BartTokenizer, BartForConditionalGeneration >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() """ BART_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ Bart uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no :obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy. head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class BartEncoder(BartPretrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`BartEncoderLayer`. Args: config: BartConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.cast_dtype = config.dtype self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = BartLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = LayerNorm(embed_dim) self.pre_ln = config.pre_ln if self.pre_ln: self.last_layernorm = LayerNorm(embed_dim) self.init_weights() def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) if self.cast_dtype: attention_mask = attention_mask.to(self.cast_dtype) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if getattr(self.config, "gradient_checkpointing", False) and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.pre_ln: hidden_states = self.last_layernorm(hidden_states) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class BartDecoder(BartPretrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer` Args: config: BartConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.cast_dtype = config.dtype self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = BartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = LayerNorm(config.d_model) self.pre_ln = config.pre_ln if self.pre_ln: self.last_layernorm = LayerNorm(config.d_model) self.init_weights() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(self.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) if attention_mask is not None: attention_mask = attention_mask.to(self.cast_dtype) assert encoder_hidden_states.dtype==self.cast_dtype encoder_attention_mask = encoder_attention_mask.to(self.cast_dtype) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, encoder_head_mask[idx] if encoder_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if self.pre_ln: hidden_states = self.last_layernorm(hidden_states) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare BART Model outputting raw hidden-states without any specific head on top.", BART_START_DOCSTRING, ) class BartModel(BartPretrainedModel): def __init__(self, config: BartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # different to other models, Bart automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING ) class BartForConditionalGeneration(BartPretrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"encoder\.version", r"decoder\.version", r"lm_head\.weight", ] def __init__(self, config: BartConfig): super().__init__(config) self.model = BartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.init_weights() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BART_GENERATION_EXAMPLE) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past=None, attention_mask=None, head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BART_START_DOCSTRING, ) class BartForSequenceClassification(BartPretrainedModel): def __init__(self, config: BartConfig, **kwargs): super().__init__(config, **kwargs) self.model = BartModel(config) self.classification_head = BartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) self.model._init_weights(self.classification_head.dense) self.model._init_weights(self.classification_head.out_proj) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id) if len(torch.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BART_START_DOCSTRING, ) class BartForQuestionAnswering(BartPretrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model._init_weights(self.qa_outputs) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, start_positions=None, end_positions=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) class BartDecoderWrapper(BartPretrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the :class:`~transformers.EncoderDecoderModel` framework. """ def __init__(self, config): super().__init__(config) self.decoder = BartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BartForCausalLM(BartPretrainedModel): def __init__(self, config): super().__init__(config) config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False self.model = BartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.init_weights() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. Returns: Example:: >>> from transformers import BartTokenizer, BartForCausalLM >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_head_mask=encoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, } @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/modeling_bart.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/__init__.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model. """ import copy import math import os import warnings import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from bart.configuration.configuration_t5 import T5Config from utils.file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from bart.modeling.modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput from bart.modeling.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", # See all T5 models at https://huggingface.co/models?filter=t5 ] #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") elif scope_names[0] == "self_attention": pointer = getattr(pointer, "layer") pointer = pointer[0] elif scope_names[0] == "enc_dec_attention": pointer = getattr(pointer, "layer") pointer = pointer[1] elif scope_names[0] == "dense_relu_dense": pointer = getattr(pointer, "layer") pointer = pointer[2] elif scope_names[0] == "rms_norm": if hasattr(pointer, "layer_norm"): pointer = getattr(pointer, "layer_norm") elif hasattr(pointer, "final_layer_norm"): pointer = getattr(pointer, "final_layer_norm") elif scope_names[0] == "scale": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") elif scope_names[0] == "decoder" and name[1] == "logits": continue elif scope_names[0] == "logits": pointer = getattr(pointer, "lm_head") elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit(): pointer = getattr(pointer, f"wi_{scope_names[1]}") continue else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name)) array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys()))) return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module) #################################################### PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (:obj:`Dict[int, list]`, optional, defaults to None): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules: - t5-small: 6 - t5-base: 12 - t5-large: 24 - t5-3b: 24 - t5-11b: 24 Example:: # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example:: # On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() """ class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # layer norm should always be calculated in float32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into float16 if necessary if self.weight.dtype == torch.float16: hidden_states = hidden_states.to(torch.float16) return self.weight * hidden_states class T5DenseReluDense(nn.Module): def __init__(self, config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = F.relu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config): super().__init__() if config.feed_forward_proj == "relu": self.DenseReluDense = T5DenseReluDense(config) elif config.feed_forward_proj == "gated-gelu": self.DenseReluDense = T5DenseGatedGeluDense(config) else: raise ValueError( f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`" ) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__(self, config: T5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_postion_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_postion_if_large = torch.min( relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large) return relative_buckets def compute_bias(self, query_length, key_length): """ Compute binned relative position bias """ context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, ) relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), "past_key_value should have 2 past states: keys and values. Got {} past states".format( len(past_key_value) ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """ projection """ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """ reshape """ return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """ projects hidden states correctly to key/query states """ if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) else: position_bias = self.compute_bias(real_seq_length, key_length) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = F.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = F.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(T5LayerCrossAttention(config)) self.layer.append(T5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: assert self.is_decoder, "Only decoder can use `past_key_values`" expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format( expected_num_past_key_values, "2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "", len(past_key_value), ) assert len(past_key_value) == expected_num_past_key_values, error_message self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) outputs = outputs + (present_key_value_state,) + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config load_tf_weights = load_tf_weights_in_t5 base_model_prefix = "transformer" is_parallelizable = True @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """ Initialize the weights """ factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, T5DenseReluDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedGeluDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert ( decoder_start_token_id is not None ), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information" # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(PARALLELIZE_DOCSTRING) def deparallelize(self): self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, encoder_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format( self ) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device) if self.is_decoder and encoder_attention_mask is not None: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) encoder_head_mask = self.get_head_mask(encoder_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] encoder_layer_head_mask = encoder_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if encoder_layer_head_mask is not None: encoder_layer_head_mask = encoder_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, encoder_layer_head_mask=encoder_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention weights), # (self-attention position bias), (cross-attention weights), (cross-attention position bias) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) T5_START_DOCSTRING = r""" The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using :class:`~transformers.T5Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for detail. `What are input IDs? <../glossary.html#input-ids>`__ To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training <./t5.html#training>`__. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ T5 uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. in the decoder Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ T5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using :class:`~transformers.T5Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for detail. To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training <./t5.html#training>`__. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ # Warning messafe for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.", T5_START_DOCSTRING, ) class T5Model(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import T5Tokenizer, T5Model >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5Model.from_pretrained('t5-small') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING) class T5ForConditionalGeneration(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Returns: Examples:: >>> from transformers import T5Tokenizer, T5ForConditionalGeneration >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5ForConditionalGeneration.from_pretrained('t5-small') >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model.generate(input_ids) """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # If decoding with past key value states, only the last tokens # should be given as an input if past_key_values is not None: assert labels is None, "Decoder should not use cached key value states when training." if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_inputs_embeds is not None: decoder_inputs_embeds = decoder_inputs_embeds[:, -1:] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim ** -0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states" "without any specific head on top.", T5_START_DOCSTRING, ) class T5EncoderModel(T5PreTrainedModel): authorized_missing_keys = [ r"encoder\.embed_tokens\.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import T5Tokenizer, T5EncoderModel >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5EncoderModel.from_pretrained('t5-small') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/modeling_t5.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import re from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, dtype, nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from utils.activations import get_activation from bart.configuration.configuration_utils import PretrainedConfig from utils.file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, cached_path, hf_bucket_url, is_remote_url, is_torch_tpu_available, replace_return_docstrings, ) from utils.generation_utils import GenerationMixin logger = logging.getLogger(__name__) try: from torch.nn import Identity except ImportError: # Older PyTorch compatibility class Identity(nn.Module): r"""A placeholder identity operator that is argument-insensitive. """ def __init__(self, *args, **kwargs): super().__init__() def forward(self, input): return input def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index class ModuleUtilsMixin: """ A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters() return sum(p.numel() for p in params) @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) return None def add_memory_hooks(self): """ Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to zero with :obj:`model.reset_memory_hooks_state()`. """ for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): """ Reset the :obj:`mem_rss_diff` attribute of each module (see :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`). """ for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> device: """ :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ try: return next(self.parameters()).device except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device @property def dtype(self) -> dtype: """ :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ try: return next(self.parameters()).dtype except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (:obj:`torch.Tensor`): An attention mask. Returns: :obj:`torch.Tensor`: The inverted attention mask. """ if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility if self.dtype == torch.float16: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4 elif self.dtype == torch.float32: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9 else: raise ValueError( "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format( self.dtype ) ) return encoder_extended_attention_mask def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask( self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False ) -> Tensor: """ Prepare the head mask if needed. Args: head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (:obj:`int`): The number of hidden layers in the model. is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`): Whether or not the attentions scores are computed by chunks or not. Returns: :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with :obj:`[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility return head_mask class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin): r""" Base class for all models. :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the TensorFlow checkpoint. - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to the model. - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore when loading the model (and avoid unnecessary warnings). """ config_class = None base_model_prefix = "" authorized_missing_keys = None @property def dummy_inputs(self) -> Dict[str, torch.Tensor]: """ :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) # Save config in model self.config = config @property def base_model(self) -> nn.Module: """ :obj:`torch.nn.Module`: The main body of the model. """ return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self) -> nn.Module: """ Returns the model's input embeddings. Returns: :obj:`nn.Module`: A torch module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): """ Set model's input embeddings Args: value (:obj:`nn.Module`): A module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: """ Returns the model's output embeddings. Returns: :obj:`nn.Module`: A torch module mapping hidden states to vocabulary. """ return None # Overwrite for models with output embeddings def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if self.config.is_encoder_decoder and self.config.tie_encoder_decoder: self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) @staticmethod def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): uninitialized_encoder_weights: List[str] = [] assert decoder.__class__ == encoder.__class__, f"{decoder.__class__} and {encoder.__class__} have to be equal." def tie_encoder_to_decoder_recursively( decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, uninitialized_encoder_weights: List[str], depth=0, ): assert isinstance(decoder_pointer, nn.Module) and isinstance( encoder_pointer, nn.Module ), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module" if hasattr(decoder_pointer, "weight"): assert hasattr(encoder_pointer, "weight") encoder_pointer.weight = decoder_pointer.weight if hasattr(decoder_pointer, "bias"): assert hasattr(encoder_pointer, "bias") encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert ( len(encoder_modules) > 0 ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()]) encoder_layer_pos = 0 for name, module in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])): # this can happen if the name corresponds to the position in a list module list of layers # in this case the decoder has added a cross-attention that the encoder does not have # thus skip this step and substract one layer pos from encoder encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError( "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model." ) else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively( decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + "/" + name, uninitialized_encoder_weights, depth=depth + 1, ) all_encoder_weights.remove(module_name + "/" + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) # tie weights recursively tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights) if len(uninitialized_encoder_weights) > 0: logger.warning( f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" ) def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """ Tie or clone module weights depending of whether we are using TorchScript or not """ if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if getattr(output_embeddings, "bias", None) is not None: output_embeddings.bias.data = torch.nn.functional.pad( output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model wihtout doing anything. Return: :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed model_embeds = base_model._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens # Tie weights again if needed self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_embeddings( self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None ) -> torch.nn.Embedding: """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`torch.nn.Embedding`` module of the model wihtout doing anything. Return: :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy token embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings def init_weights(self): """ Initializes and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) # Tie weights if needed self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. """ if os.path.isfile(save_directory): logger.error("Provided path ({}) should be a directory, not a file".format(save_directory)) return os.makedirs(save_directory, exist_ok=True) # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, "module") else self # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__] # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) if getattr(self.config, "xla_device", False): import torch_xla.core.xla_model as xm if xm.is_master_ordinal(): # Save configuration file model_to_save.config.save_pretrained(save_directory) # xm.save takes care of saving only from master xm.save(model_to_save.state_dict(), output_model_file) else: model_to_save.config.save_pretrained(save_directory) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with ``model.train()``. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string with the `shortcut name` of a pretrained model to load from cache or download, e.g., ``bert-base-uncased``. - A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g., ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `shortcut name` string of a pretrained model). - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of ``pretrained_model_name_or_path`` argument). force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionnary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attention=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: from transformers import BertConfig, BertModel # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('bert-base-uncased') # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). model = BertModel.from_pretrained('./test/saved_model/') # Update configuration during loading. model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): # Load from a TF 1.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path, ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert ( from_tf ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME), use_cdn=use_cdn, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") if "state_dict" in state_dict.keys(): #Loading models that store optimizer states, etc along with state_dict in ckpt state_dict = state_dict["state_dict"] except Exception: raise OSError( "Unable to load weights from pytorch checkpoint file. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) missing_keys = [] unexpected_keys = [] error_msgs = [] if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from transformers import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise else: # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if "model." in key: new_key = key.replace("model.", "") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: nn.Module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") # Make sure we are able to load base models as well as derived models (with heads) start_prefix = "" model_to_load = model has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()) if not hasattr(model, cls.base_model_prefix) and has_prefix_module: start_prefix = cls.base_model_prefix + "." if hasattr(model, cls.base_model_prefix) and not has_prefix_module: model_to_load = getattr(model, cls.base_model_prefix) load(model_to_load, prefix=start_prefix) if model.__class__.__name__ != model_to_load.__class__.__name__: base_model_state_dict = model_to_load.state_dict().keys() head_model_state_dict_without_base_prefix = [ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() ] missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) # Some models may have keys that are not in the state by design, removing them before needlessly warning # the user. if cls.authorized_missing_keys is not None: for pat in cls.authorized_missing_keys: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(error_msgs) ) ) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs, } return model, loading_info if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available(): import torch_xla.core.xla_model as xm model = xm.send_cpu_data_to_device(model, xm.xla_device()) model.to(xm.xla_device()) return model class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class PoolerStartLogits(nn.Module): """ Compute SQuAD start logits from sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: :obj:`torch.FloatTensor`: The start logits for SQuAD. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerEndLogits(nn.Module): """ Compute SQuAD end logits from sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the :obj:`layer_norm_eps` to use. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): The hidden states of the first tokens for the labeled span. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): The position of the first token for the labeled span. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. .. note:: One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, ``start_positions`` overrides ``start_states``. Returns: :obj:`torch.FloatTensor`: The end logits for SQuAD. """ assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model. """ def __init__(self, config): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): The hidden states of the first tokens for the labeled span. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): The position of the first token for the labeled span. cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. .. note:: One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, ``start_positions`` overrides ``start_states``. Returns: :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class. """ # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. hsz = hidden_states.shape[-1] assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x @dataclass class SquadHeadOutput(ModelOutput): """ Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the ``is_impossible`` label of the answers. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None class SQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the :obj:`layer_norm_eps` to use. """ def __init__(self, config): super().__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) def forward( self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, is_impossible: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, return_dict: bool = False, ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): Final hidden states of the model on the sequence tokens. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Positions of the first token for the labeled span. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Positions of the last token for the labeled span. cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Whether the question has a possible answer in the paragraph or not. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return a :class:`~transformers.file_utils.ModelOuput` instead of a plain tuple. Returns: """ start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) if not return_dict: return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) else: return SquadHeadOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, ) class SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: PretrainedConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = (get_activation(activation_string) if activation_string else Identity()) self.first_dropout = Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`): Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: :obj:`torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long,) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: """ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Used to remove heads. Args: layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices. Returns: :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer( layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[torch.nn.Linear, Conv1D]: """ Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError("Can't prune layer of class {}".format(layer.__class__)) def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: """ This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory. If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as directly applying :obj:`forward_fn` to :obj:`input_tensors`. Args: forward_fn (:obj:`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (:obj:`int`): The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (:obj:`int`): The dimension over which the :obj:`input_tensors` should be chunked. input_tensors (:obj:`Tuple[torch.Tensor]`): The input tensors of ``forward_fn`` which will be chunked. Returns: :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`foward_fn` would have given if applied`. Examples:: # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) """ assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors) tensor_shape = input_tensors[0].shape assert all( input_tensor.shape == tensor_shape for input_tensor in input_tensors ), "All input tenors have to be of the same shape" # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) assert num_args_in_forward_chunk_fn == len( input_tensors ), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format( num_args_in_forward_chunk_fn, len(input_tensors) ) if chunk_size > 0: assert ( input_tensors[0].shape[chunk_dim] % chunk_size == 0 ), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format( input_tensors[0].shape[chunk_dim], chunk_size ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size # chunk input tensor into tuples input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) # apply forward fn to every tuple output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) # concatenate output at same dimension return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/modeling_utils.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== class BertSelfAttention(nn.Module): def __init__( self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False, # otherwise self_attention ): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % num_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, num_heads)) self.num_heads = num_heads self.attention_head_size = int(config.hidden_size / num_heads) self.all_head_size = self.num_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_heads, self.attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_heads, self.attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = F.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = torch.reshape(context_layer, new_context_layer_shape) return context_layer
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/bart/modeling/bert_attn.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for python and fast tokenizers. Fast tokenizers are provided by HuggingFace's tokenizers library.""" import copy import functools import itertools import json import logging import operator import os import re import warnings import unicodedata import collections from collections import UserDict, defaultdict from contextlib import contextmanager from enum import Enum from typing import Any, Dict, List, MutableMapping, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np from tokenizers import AddedToken as AddedTokenFast from tokenizers import Encoding as EncodingFast from tokenizers import BertWordPieceTokenizer from tokenizers.decoders import Decoder as DecoderFast from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast from file_utils import cached_path, hf_bucket_url, is_remote_url, is_tf_available, is_torch_available, torch_required if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch logger = logging.getLogger(__name__) NO_PAD_TOKEN_FOR_BATCH_MSG = ( "No padding token is set for this model, therefore no batch can be made with uneven " "sequences. Set a padding token or adjust the lengths of the sequences building the " "batch so that every sequence is of the same length." ) UNEVEN_SEQUENCES_FOR_BATCH_MSG = ( "The sequences building the batch are not of the same size, no tensor " "can be built. Set `pad_to_max_length=True` to pad the smaller sequences" "up to the larger sequence's length." ) SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER # Define type aliases and NamedTuples TextInput = str PreTokenizedInput = List[str] EncodedInput = List[int] TextInputPair = Tuple[str, str] PreTokenizedInputPair = Tuple[List[str], List[str]] EncodedInputPair = Tuple[List[int], List[int]] class TensorType(Enum): PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" class CharSpan(NamedTuple): """ Character span in the original string Args: start: index of the first character in the original string end: index of the character following the last character in the original string """ start: int end: int class TokenSpan(NamedTuple): """ Token span in an encoded string (list of tokens) Args: start: index of the first token in the span end: index of the token following the last token in the span """ start: int end: int def flatten(x: Sequence): """ Flatten the provided (potentially nested) sequence Args: x (Sequence): Potentially nested sequence to flatten Returns: list: Flattened sequence """ return functools.reduce(operator.iconcat, x, []) @contextmanager def truncate_and_pad( tokenizer: BaseTokenizerFast, max_length: int, stride: int, strategy: str, pad_to_max_length: bool, padding_side: str, pad_token_id: int, pad_token_type_id: int, pad_token: str, ): """ This contextmanager is in charge of defining the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards. This contextmanager assumes the provider tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a padding / truncation strategy before, then it will be reset to no padding/truncation when exiting the managed section. Args: tokenizer (BaseTokenizerFast): The tokenizer which will be used max_length (int): The maximum size of the sequence stride (int): The stride to use when handling overflow strategy (str): Overflowing logic to use pad_to_max_length (bool): Boolean indicating if the output needs to be padded up to max_length padding_side (str): "left" or "right" indicating the direction the output sequence will be padded pad_token_id (int): The integer representation of the padding token to use pad_token_type_id (int): The integer representation of the padding token type to use pad_token (str): The string representation of the padding token to use """ # Handle all the truncation and padding stuff if max_length is not None: tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) if pad_to_max_length and (pad_token and pad_token_id >= 0): tokenizer.enable_padding( max_length=max_length, direction=padding_side, pad_id=pad_token_id, pad_type_id=pad_token_type_id, pad_token=pad_token, ) elif pad_to_max_length: logger.warning( "Disabled padding because no padding token set (pad_token: {}, pad_token_id: {}).\n" "To remove this error, you can add a new pad token and then resize model embedding:\n" "\ttokenizer.pad_token = '<PAD>'\n\tmodel.resize_token_embeddings(len(tokenizer))".format( pad_token, pad_token_id ) ) yield # TODO(morgan, anthony): once we have a simple way to serialize tokenizers maybe store and restore the state afterward # to avoid destructing the padding / truncation strategy as we do now. if max_length is not None: tokenizer.no_truncation() if pad_to_max_length and (pad_token and pad_token_id >= 0): tokenizer.no_padding() def convert_to_tensors( batch_outputs: MutableMapping, return_tensors: Union[str, TensorType], prepend_batch_axis: bool = False ) -> MutableMapping: # Convert to TensorType if not isinstance(return_tensors, TensorType): return_tensors = TensorType(return_tensors) # Get a function reference for the correct framework if return_tensors == TensorType.TENSORFLOW and is_tf_available(): as_tensor = tf.constant elif return_tensors == TensorType.PYTORCH and is_torch_available(): as_tensor = torch.tensor elif return_tensors == TensorType.NUMPY: as_tensor = np.asarray else: raise ImportError( "Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format( return_tensors ) ) # Do the tensor conversion in batch for key, value in batch_outputs.items(): try: if prepend_batch_axis: value = [value] tensor = as_tensor(value) # at-least2d if tensor.ndim > 2: tensor = tensor.squeeze(0) elif tensor.ndim < 2: tensor = tensor[None, :] batch_outputs[key] = tensor except ValueError: if None in [item for sequence in value for item in sequence]: raise ValueError(NO_PAD_TOKEN_FOR_BATCH_MSG) else: raise ValueError(UNEVEN_SEQUENCES_FOR_BATCH_MSG) return batch_outputs class BatchEncoding(UserDict): """ BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc). This class is derived from a python Dictionary and can be used as a dictionnary. In addition, this class expose utility methods to map from word/char space to token space. Args: data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...) encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`): If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space the `EncodingFast` instance or list of instance (for batches) hold these informations. """ def __init__( self, data: Optional[Dict[str, Any]] = None, encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, ): super().__init__(data) if isinstance(encoding, EncodingFast): encoding = [encoding] self._encodings = encoding def __getitem__(self, item: Union[int, str]) -> EncodingFast: """ If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...) If the key is an integer, get the EncodingFast for batch item with index `key` """ if isinstance(item, str): return self.data[item] elif self._encodings is not None: return self._encodings[item] else: raise KeyError( "Indexing with integers (to access backend Encoding for a given batch index) " "is not available when using Python based tokenizers" ) def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def keys(self): return self.data.keys() def values(self): return self.data.values() def items(self): return self.data.items() # After this point: # Extended properties and methods only available for fast (Rust-based) tokenizers # provided by HuggingFace tokenizers library. @property def encodings(self) -> Optional[List[EncodingFast]]: """ Return the list all encoding from the tokenization process Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer """ return self._encodings def tokens(self, batch_index: int = 0) -> List[int]: if not self._encodings: raise ValueError("tokens() is not available when using Python based tokenizers") return self._encodings[batch_index].tokens def words(self, batch_index: int = 0) -> List[Optional[int]]: if not self._encodings: raise ValueError("words() is not available when using Python based tokenizers") return self._encodings[batch_index].words def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. Can be called as: - self.token_to_word(token_index) if batch size is 1 - self.token_to_word(batch_index, token_index) if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence token_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the token in the sequence. Returns: word_index (:obj:`int`): index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_word() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_word(token_index) def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int] = None) -> TokenSpan: """ Get the encoded token span corresponding to a word in the sequence of the batch. Token spans are returned as a TokenSpan NamedTuple with: start: index of the first token end: index of the token following the last token Can be called as: - self.word_to_tokens(word_index) if batch size is 1 - self.word_to_tokens(batch_index, word_index) if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_word_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence word_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: token_span (:obj:`TokenSpan`): Span of tokens in the encoded sequence. TokenSpan are NamedTuple with: start: index of the first token end: index of the token following the last token """ if not self._encodings: raise ValueError("word_to_tokens() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index if batch_index < 0: batch_index = self._batch_size + batch_index if word_index < 0: word_index = self._seq_len + word_index return TokenSpan(*(self._encodings[batch_index].word_to_tokens(word_index))) def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan: """ Get the character span corresponding to an encoded token in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: start: index of the first character in the original string associated to the token end: index of the character following the last character in the original string associated to the token Can be called as: - self.token_to_chars(token_index) if batch size is 1 - self.token_to_chars(batch_index, token_index) if batch size is greater or equal to 1 Args: batch_or_token_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence token_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the token or tokens in the sequence. Returns: char_span (:obj:`CharSpan`): Span of characters in the original string. CharSpan are NamedTuple with: start: index of the first character in the original string end: index of the character following the last character in the original string """ if not self._encodings: raise ValueError("token_to_chars() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index))) def char_to_token(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int: """ Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch. Can be called as: - self.char_to_token(char_index) if batch size is 1 - self.char_to_token(batch_index, char_index) if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence char_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: token_index (:obj:`int`): Index of the token. """ if not self._encodings: raise ValueError("char_to_token() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_token(char_index) def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int] = None) -> CharSpan: """ Get the character span in the original string corresponding to given word in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: start: index of the first character in the original string end: index of the character following the last character in the original string Can be called as: - self.word_to_chars(word_index) if batch size is 1 - self.word_to_chars(batch_index, word_index) if batch size is greater or equal to 1 Args: batch_or_word_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence word_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the sequence. Returns: char_span (:obj:`CharSpan` or :obj:`List[CharSpan]`): Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with: start: index of the first character associated to the token in the original string end: index of the character following the last character associated to the token in the original string """ if not self._encodings: raise ValueError("word_to_chars() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index))) def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None) -> int: """ Get the word in the original string corresponding to a character in the original string of a sequence of the batch. Can be called as: - self.char_to_word(char_index) if batch size is 1 - self.char_to_word(batch_index, char_index) if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (:obj:`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the orginal string. char_index (:obj:`int`, `optional`): If a batch index is provided in `batch_or_token_index`, this can be the index of the character in the orginal string. Returns: token_index (:obj:`int` or :obj:`List[int]`): Index or indices of the associated encoded token(s). """ if not self._encodings: raise ValueError("char_to_word() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_word(char_index) @torch_required def to(self, device: str): """Send all values to device by calling v.to(device)""" self.data = {k: v.to(device) for k, v in self.data.items()} return self class SpecialTokensMixin: """ SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and handles specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access to these special tokens in a model-independant manner and allow to set and update the special tokens. """ SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] def __init__(self, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._pad_token_type_id = 0 self._additional_special_tokens = [] for key, value in kwargs.items(): if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value) setattr(self, key, value) elif isinstance(value, AddedTokenFast): setattr(self, key, str(value)) elif isinstance(value, str): setattr(self, key, value) else: raise TypeError( "special token {} has to be either str or AddedTokenFast but got: {}".format(key, type(value)) ) @property def bos_token(self): """ Beginning of sentence token (string). Log an error if used while not having been set. """ if self._bos_token is None: logger.error("Using bos_token, but it is not set yet.") return self._bos_token @property def eos_token(self): """ End of sentence token (string). Log an error if used while not having been set. """ if self._eos_token is None: logger.error("Using eos_token, but it is not set yet.") return self._eos_token @property def unk_token(self): """ Unknown token (string). Log an error if used while not having been set. """ if self._unk_token is None: logger.error("Using unk_token, but it is not set yet.") return self._unk_token @property def sep_token(self): """ Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ if self._sep_token is None: logger.error("Using sep_token, but it is not set yet.") return self._sep_token @property def pad_token(self): """ Padding token (string). Log an error if used while not having been set. """ if self._pad_token is None: logger.error("Using pad_token, but it is not set yet.") return self._pad_token @property def cls_token(self): """ Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ if self._cls_token is None: logger.error("Using cls_token, but it is not set yet.") return self._cls_token @property def mask_token(self): """ Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ if self._mask_token is None: logger.error("Using mask_token, but it is not set yet.") return self._mask_token @property def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """ if self._additional_special_tokens is None: logger.error("Using additional_special_tokens, but it is not set yet.") return self._additional_special_tokens def _maybe_update_backend(self, value): """ To be overriden by derived class if a backend tokenizer has to be updated. """ pass @bos_token.setter def bos_token(self, value): self._bos_token = value self._maybe_update_backend([value]) @eos_token.setter def eos_token(self, value): self._eos_token = value self._maybe_update_backend([value]) @unk_token.setter def unk_token(self, value): self._unk_token = value self._maybe_update_backend([value]) @sep_token.setter def sep_token(self, value): self._sep_token = value self._maybe_update_backend([value]) @pad_token.setter def pad_token(self, value): self._pad_token = value self._maybe_update_backend([value]) @cls_token.setter def cls_token(self, value): self._cls_token = value self._maybe_update_backend([value]) @mask_token.setter def mask_token(self, value): self._mask_token = value self._maybe_update_backend([value]) @additional_special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value self._maybe_update_backend(value) @property def bos_token_id(self): """ Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.bos_token) @property def eos_token_id(self): """ Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.eos_token) @property def unk_token_id(self): """ Id of the unknown token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.unk_token) @property def sep_token_id(self): """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.sep_token) @property def pad_token_id(self): """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.pad_token) @property def pad_token_type_id(self): """ Id of the padding token type in the vocabulary.""" return self._pad_token_type_id @property def cls_token_id(self): """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.cls_token) @property def mask_token_id(self): """ Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.mask_token) @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.additional_special_tokens) @property def special_tokens_map(self): """ A dictionary mapping special token class attribute (cls_token, unk_token...) to their values ('<unk>', '<cls>'...) """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, "_" + attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens(self): """ List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]) all_toks = list(set(all_toks)) return all_toks @property def all_special_ids(self): """ List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids class PreTrainedTokenizer(SpecialTokensMixin): """ Base class for all tokenizers. Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). Class attributes (overridden by derived classes): - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file. - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size. - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method. Args: - ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model. When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`). no associated max_length can be found in ``max_model_input_sizes``. - ``padding_side``: (`Optional`) string: the side on which the model should have padding applied. Should be selected between ['right', 'left'] - ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the model ("token_type_ids", "attention_mask"...). - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id`` - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id`` - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id`` - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id`` - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id`` - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id`` - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id`` - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids`` """ vocab_files_names: Dict[str, str] = {} pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {} pretrained_init_configuration: Dict[str, Dict[str, Any]] = {} max_model_input_sizes: Dict[str, int] = {} model_input_names: List[str] = ["token_type_ids", "attention_mask"] padding_side: str = "right" @property def vocab_size(self) -> int: """ Size of the base vocabulary (without the added tokens) """ raise NotImplementedError @property def is_fast(self) -> bool: return False @property def max_len(self) -> int: """ Kept here for backward compatibility. Now renamed to `model_max_length` to avoid ambiguity. """ return self.model_max_length @property def max_len_single_sentence(self) -> int: return self.model_max_length - self.num_special_tokens_to_add(pair=False) @property def max_len_sentences_pair(self) -> int: return self.model_max_length - self.num_special_tokens_to_add(pair=True) @max_len_single_sentence.setter def max_len_single_sentence(self, value) -> int: """ For backward compatibility, allow to try to setup 'max_len_single_sentence' """ if value == self.model_max_length - self.num_special_tokens_to_add(pair=False): logger.warning( "Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up." ) else: raise ValueError( "Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up." ) @max_len_sentences_pair.setter def max_len_sentences_pair(self, value) -> int: """ For backward compatibility, allow to try to setup 'max_len_sentences_pair' """ if value == self.model_max_length - self.num_special_tokens_to_add(pair=True): logger.warning( "Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up." ) else: raise ValueError( "Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up." ) def get_vocab(self): """ Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """ raise NotImplementedError() def __init__(self, model_max_length=None, **kwargs): super().__init__(**kwargs) # For backward compatibility we fallback to set model_max_length from max_len if provided if "max_len" in kwargs: warnings.warn( "Parameter max_len is deprecated and will be removed in a future release. " "Use model_max_length instead.", category=FutureWarning, ) model_max_length = kwargs.pop("max_len") self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER # Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed. self.padding_side = kwargs.pop("padding_side", self.padding_side) assert self.padding_side in [ "right", "left", ], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) # Added tokens self.added_tokens_encoder = {} self.unique_added_tokens_encoder = set() self.added_tokens_decoder = {} # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () self.init_kwargs = {} def __len__(self): """ Size of the full vocabulary with the added tokens """ return self.vocab_size + len(self.added_tokens_encoder) @classmethod def from_pretrained(cls, *inputs, **kwargs): r""" Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. cache_dir: (`optional`) string: Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method. kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details. Examples:: # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased') # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt') # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>') # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == '<unk>' """ return cls._from_pretrained(*inputs, **kwargs) @classmethod def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} init_configuration = {} if pretrained_model_name_or_path in s3_models: # Get the vocabulary from AWS S3 bucket for file_id, map_list in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] if ( cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration ): init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path].copy() else: # Get the vocabulary from local files logger.info( "Model name '{}' not found in model shortcut name list ({}). " "Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path ) ) if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not supported." "Use a model identifier or the path to a directory instead." ) logger.warning( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated" ) file_id = list(cls.vocab_files_names.keys())[0] vocab_files[file_id] = pretrained_model_name_or_path else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name additional_files_names = { "added_tokens_file": ADDED_TOKENS_FILE, "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, "tokenizer_config_file": TOKENIZER_CONFIG_FILE, } # Look for the tokenizer main vocabulary files + the additional tokens files for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items(): if os.path.isdir(pretrained_model_name_or_path): full_file_name = os.path.join(pretrained_model_name_or_path, file_name) if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None else: full_file_name = hf_bucket_url( pretrained_model_name_or_path, filename=file_name, use_cdn=False ) vocab_files[file_id] = full_file_name # Get files from url, cache, or disk depending on the case try: resolved_vocab_files = {} for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path( file_path, cache_dir=cache_dir ) except EnvironmentError: if pretrained_model_name_or_path in s3_models: msg = "Couldn't reach server at '{}' to download vocabulary files." else: msg = ( "Model name '{}' was not found in tokenizers model name list ({}). " "We assumed '{}' was a path or url to a directory containing vocabulary files " "named {}, but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()), ) ) raise EnvironmentError(msg) if all(full_file_name is None for full_file_name in resolved_vocab_files.values()): raise EnvironmentError( "Model name '{}' was not found in tokenizers model name list ({}). " "We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files " "named {} but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ", ".join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()), ) ) for file_id, file_path in vocab_files.items(): if file_path == resolved_vocab_files[file_id]: logger.info("loading file {}".format(file_path)) else: logger.info("loading file {} from cache at {}".format(file_path, resolved_vocab_files[file_id])) # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: init_kwargs = json.load(tokenizer_config_handle) saved_init_inputs = init_kwargs.pop("init_inputs", ()) if not init_inputs: init_inputs = saved_init_inputs else: init_kwargs = init_configuration # Update with newly provided kwargs init_kwargs.update(kwargs) # Set max length if needed if pretrained_model_name_or_path in cls.max_model_input_sizes: # if we're using a pretrained model, ensure the tokenizer # wont index sequences longer than the number of positional embeddings model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path] if model_max_length is not None and isinstance(model_max_length, (int, float)): init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path if special_tokens_map_file is not None: with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if key not in init_kwargs: init_kwargs[key] = value # Instantiate tokenizer. try: tokenizer = cls(*init_inputs, **init_kwargs) except OSError: raise OSError( "Unable to load vocabulary from file. " "Please check that the provided vocabulary is accessible and not corrupted." ) # Save inputs and kwargs for saving and re-loading with ``save_pretrained`` tokenizer.init_inputs = init_inputs tokenizer.init_kwargs = init_kwargs # update unique_added_tokens_encoder with special tokens for correct tokenization tokenizer.unique_added_tokens_encoder.update(set(tokenizer.all_special_tokens)) # Add supplementary tokens. if added_tokens_file is not None: with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: added_tok_encoder = json.load(added_tokens_handle) added_tok_decoder = {v: k for k, v in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) tokenizer.unique_added_tokens_encoder.update(set(tokenizer.added_tokens_encoder.keys())) return tokenizer def save_pretrained(self, save_directory): """ Save the tokenizer vocabulary files together with: - added tokens, - special-tokens-to-class-attributes-mapping, - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert). Warning: This won't save modifications you may have applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation). This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method. """ if not os.path.isdir(save_directory): logger.error("Saving directory ({}) should be a directory".format(save_directory)) return special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE) tokenizer_config = copy.deepcopy(self.init_kwargs) if len(self.init_inputs) > 0: tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) with open(tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_config, ensure_ascii=False)) with open(special_tokens_map_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.special_tokens_map, ensure_ascii=False)) if len(self.added_tokens_encoder) > 0: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False) f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return vocab_files + (special_tokens_map_file, added_tokens_file) def save_vocabulary(self, save_directory) -> Tuple[str]: """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens and special token mappings. Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method. """ raise NotImplementedError def add_tokens(self, new_tokens: Union[str, List[str]]) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. """ if not new_tokens: return 0 if not isinstance(new_tokens, list): new_tokens = [new_tokens] tokens_to_add = [] for token in new_tokens: assert isinstance(token, str) if self.init_kwargs.get("do_lower_case", False) and token not in self.all_special_tokens: token = token.lower() if ( token != self.unk_token and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and token not in tokens_to_add ): tokens_to_add.append(token) logger.info("Adding %s to the vocabulary", token) added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add)) added_tok_decoder = {v: k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens)) self.added_tokens_decoder.update(added_tok_decoder) return len(tokens_to_add) def num_special_tokens_to_add(self, pair=False): """ Returns the number of added tokens when encoding a sequence with special tokens. Note: This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the number of added tokens in the case of a single sequence if set to False. Returns: Number of tokens added to sequences """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def add_special_tokens(self, special_tokens_dict): """ Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). Using `add_special_tokens` will ensure your special tokens can be used in several ways: - special tokens are carefully handled by the tokenizer (they are never split) - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>') Args: special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') special_tokens_dict = {'cls_token': '<CLS>'} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. assert tokenizer.cls_token == '<CLS>' """ if not special_tokens_dict: return 0 added_tokens = 0 for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all(isinstance(t, str) for t in value) added_tokens += self.add_tokens(value) else: assert isinstance(value, str) added_tokens += self.add_tokens([value]) logger.info("Assigning %s to the %s key of the tokenizer", value, key) setattr(self, key, value) return added_tokens def tokenize(self, text: TextInput, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Take care of added tokens. Args: text (:obj:`string`): The sequence to be encoded. **kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method. """ all_special_tokens = self.all_special_tokens text = self.prepare_for_tokenization(text, **kwargs) # TODO: should this be in the base class? def lowercase_text(t): # convert non-special tokens to lowercase escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" return re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t) if self.init_kwargs.get("do_lower_case", False): text = lowercase_text(text) def split_on_token(tok, text): result = [] split_text = text.split(tok) for i, sub_text in enumerate(split_text): sub_text = sub_text.rstrip() if i == 0 and not sub_text: result += [tok] elif i == len(split_text) - 1: if sub_text: result += [sub_text] else: pass else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if not text.strip(): return [] if not tok_list: return self._tokenize(text) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if sub_text not in self.unique_added_tokens_encoder: tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return list( itertools.chain.from_iterable( ( self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token] for token in tokenized_text ) ) ) added_tokens = self.unique_added_tokens_encoder tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens): """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. """ if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def encode( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, max_length: Optional[int] = None, stride: int = 0, truncation_strategy: str = "longest_first", pad_to_max_length: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs ): """ Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Adds the model-specific special tokens (such as beginning of sequence, end of sequence, sequence separator). If specifying ``add_special_tokens=False``, same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, the sequences will be encoded with the special tokens relative to their model. max_length (:obj:`int`, `optional`, defaults to :obj:`None`): If set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary. You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`. stride (:obj:`int`, `optional`, defaults to ``0``): If set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`): String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, the returned sequences will be padded according to the model's padding side and padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. The tokenizer padding sides are handled by the class attribute `padding_side` which can be set to the following strings: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences Defaults to False: no padding. return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`): Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant` or PyTorch :obj:`torch.Tensor` instead of a list of python integers. **kwargs: passed to the `self.tokenize()` method """ encoded_inputs = self.encode_plus( text, text_pair=text_pair, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, pad_to_max_length=pad_to_max_length, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, max_length: Optional[int] = None, stride: int = 0, truncation_strategy: str = "longest_first", pad_to_max_length: bool = False, is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, **kwargs ) -> BatchEncoding: """ Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, the sequences will be encoded with the special tokens relative to their model. max_length (:obj:`int`, `optional`, defaults to :obj:`None`): If set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`. stride (:obj:`int`, `optional`, defaults to ``0``): If set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`): String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, the returned sequences will be padded according to the model's padding side and padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. The tokenizer padding sides are handled by the class attribute `padding_side` which can be set to the following strings: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences Defaults to False: no padding. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Set to True to indicate the input is already tokenized return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`): Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant` or PyTorch :obj:`torch.Tensor` instead of a list of python integers. return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are token type IDs? <../glossary.html#token-type-ids>`_ return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return overflowing token information (default False). return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return special tokens mask information (default False). return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return (char_start, char_end) for each token (default False). If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast. **kwargs: passed to the `self.tokenize()` method Return: A Dictionary of shape:: { input_ids: list[int], token_type_ids: list[int] if return_token_type_ids is True (default) attention_mask: list[int] if return_attention_mask is True (default) overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True } With the fields: - ``input_ids``: list of token ids to be fed to a model - ``token_type_ids``: list of token type ids to be fed to a model - ``attention_mask``: list of indices specifying which tokens should be attended to by the model - ``overflowing_tokens``: list of overflowing tokens if a max length is specified. - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. """ def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) # Throw an error if we can pad because there is no padding token if pad_to_max_length and self.pad_token_id is None: raise ValueError( "Unable to set proper padding strategy as the tokenizer does not have a padding token. " "In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " "or add a new pad token via the function add_special_tokens if you want to use a padding strategy" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model( first_ids, pair_ids=second_ids, max_length=max_length, pad_to_max_length=pad_to_max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, prepend_batch_axis=return_tensors is not None, ) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, max_length: Optional[int] = None, stride: int = 0, truncation_strategy: str = "longest_first", pad_to_max_length: bool = False, is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_masks: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_masks: bool = False, return_offsets_mapping: bool = False, return_lengths: bool = False, **kwargs ) -> BatchEncoding: """ Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also: :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in encode_plus) add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, the sequences will be encoded with the special tokens relative to their model. max_length (:obj:`int`, `optional`, defaults to :obj:`None`): If set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary stride (:obj:`int`, `optional`, defaults to ``0``): If set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`): String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to True, the returned sequences will be padded according to the model's padding side and padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. The tokenizer padding sides are handled by the class attribute `padding_side` which can be set to the following strings: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences Defaults to False: no padding. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Set to True to indicate the input is already tokenized return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`): Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant` or PyTorch :obj:`torch.Tensor` instead of a list of python integers. return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are token type IDs? <../glossary.html#token-type-ids>`_ return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return overflowing token information (default False). return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return special tokens mask information (default False). return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return (char_start, char_end) for each token (default False). If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on Rust-based tokenizers inheriting from PreTrainedTokenizerFast. return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`): If set the resulting dictionary will include the length of each encoded inputs **kwargs: passed to the `self.tokenize()` method Return: A Dictionary of shape:: { input_ids: list[List[int]], token_type_ids: list[List[int]] if return_token_type_ids is True (default) attention_mask: list[List[int]] if return_attention_mask is True (default) overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True } With the fields: - ``input_ids``: list of token ids to be fed to a model - ``token_type_ids``: list of token type ids to be fed to a model - ``attention_mask``: list of indices specifying which tokens should be attended to by the model - ``overflowing_tokens``: list of overflowing tokens if a max length is specified. - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. """ def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, add_special_tokens=add_special_tokens, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) # Throw an error if we can pad because there is no padding token if pad_to_max_length and self.pad_token_id is None: raise ValueError( "Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy" ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers." "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if isinstance(ids_or_pair_ids, (list, tuple)) and len(ids_or_pair_ids) == 2 and not is_pretokenized: ids, pair_ids = ids_or_pair_ids else: ids, pair_ids = ids_or_pair_ids, None first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) if max_length is None and pad_to_max_length: def total_sequence_length(input_pairs): first_ids, second_ids = input_pairs return len(first_ids) + ( self.num_special_tokens_to_add() if second_ids is None else (len(second_ids) + self.num_special_tokens_to_add(pair=True)) ) max_length = max([total_sequence_length(ids) for ids in input_ids]) batch_outputs = {} for first_ids, second_ids in input_ids: # Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by # the model. It adds special tokens, truncates sequences if overflowing while taking into account # the special tokens and manages a window stride for overflowing tokens outputs = self.prepare_for_model( first_ids, pair_ids=second_ids, max_length=max_length, pad_to_max_length=pad_to_max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_attention_mask=return_attention_masks, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_masks, return_lengths=return_lengths, return_tensors=None, # We will convert the whole batch to tensors at the end ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) if return_tensors is not None: convert_to_tensors(batch_outputs, return_tensors) return BatchEncoding(batch_outputs) def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, max_length: Optional[int] = None, add_special_tokens: bool = True, stride: int = 0, truncation_strategy: str = "longest_first", pad_to_max_length: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_lengths: bool = False, prepend_batch_axis: bool = False, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: ids: list of tokenized input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. max_length: maximum length of the returned list. Will truncate by taking into account the special tokens. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative to their model. stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential list of inputs. The overflowing token will contains a part of the previous window of tokens. truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. The tokenizer padding sides are handled by the following strings: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences Defaults to False: no padding. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False). return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False). return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`): If set the resulting dictionary will include the length of each encoded inputs prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`): If set the resulting object will feature an extra dim at position 0. This can be seen as an unsqueezing operator. Return: A Dictionary of shape:: { input_ids: list[int], token_type_ids: list[int] if return_token_type_ids is True (default) overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True length: int if return_lengths is True } With the fields: - ``input_ids``: list of token ids to be fed to a model - ``token_type_ids``: list of token type ids to be fed to a model - ``overflowing_tokens``: list of overflowing tokens if a max length is specified. - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. - ``length``: this is the length of ``input_ids`` """ pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Truncation: Handle max sequence length total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) if max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else []) # Build output dictionnary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Check lengths assert max_length is None or len(encoded_inputs["input_ids"]) <= max_length if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length: logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " "for this model ({} > {}). Running this sequence through the model will result in " "indexing errors".format(len(ids), self.model_max_length) ) # Padding needs_to_be_padded = pad_to_max_length and ( max_length and len(encoded_inputs["input_ids"]) < max_length or max_length is None and len(encoded_inputs["input_ids"]) < self.model_max_length and self.model_max_length <= LARGE_INTEGER ) if pad_to_max_length and max_length is None and self.model_max_length > LARGE_INTEGER: logger.warning( "Sequence can't be padded as no maximum length is specified and the model maximum length is too high." ) if needs_to_be_padded: difference = (max_length if max_length is not None else self.model_max_length) - len( encoded_inputs["input_ids"] ) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference if return_token_type_ids: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if return_special_tokens_mask: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"]) if return_token_type_ids: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if return_special_tokens_mask: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) else: if return_attention_mask: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) if return_lengths: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) # Prepare model inputs as tensors if asked if return_tensors is not None: convert_to_tensors(encoded_inputs, return_tensors, prepend_batch_axis) return BatchEncoding(encoded_inputs) def prepare_for_tokenization(self, text: str, **kwargs) -> str: """ Performs any necessary transformations before tokenization """ return text def truncate_sequences( self, ids: List[int], pair_ids: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: str = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in place to the maximum length. Args: ids: list of tokenized input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``): number of tokens to remove using the truncation strategy truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences). Overflowing tokens only contains overflow from the first sequence. - 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove. - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) stride (:obj:`int`, `optional`, defaults to ``0``): If set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. """ if num_tokens_to_remove <= 0: return ids, pair_ids, [] if truncation_strategy == "longest_first": overflowing_tokens = [] for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): overflowing_tokens = [ids[-1]] + overflowing_tokens ids = ids[:-1] else: pair_ids = pair_ids[:-1] window_len = min(len(ids), stride) if window_len > 0: overflowing_tokens = ids[-window_len:] + overflowing_tokens elif truncation_strategy == "only_first": assert len(ids) > num_tokens_to_remove window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] elif truncation_strategy == "only_second": assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] elif truncation_strategy == "do_not_truncate": raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.") else: raise ValueError( "Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']" ) return (ids, pair_ids, overflowing_tokens) def create_token_type_ids_from_sequences(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List[int]: if token_ids_1 is None: return len(token_ids_0) * [0] return [0] * len(token_ids_0) + [1] * len(token_ids_1) def build_inputs_with_special_tokens(self, token_ids_0: List, token_ids_1: Optional[List] = None) -> List: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. This implementation does not add special tokens. """ if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[int, List[int]]: """ Converts a single index or a sequence of indices (integers) in a token " (resp.) a sequence of tokens (str), using the vocabulary and added tokens. Args: skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False """ if isinstance(ids, int): if ids in self.added_tokens_decoder: return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self.added_tokens_decoder: tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index: int) -> str: raise NotImplementedError def convert_tokens_to_string(self, tokens: List[str]) -> str: """ Converts a sequence of tokens (string) in a single string. The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids)) but we often want to remove sub-word tokenization artifacts at the same time. """ return " ".join(self.convert_ids_to_tokens(tokens)) def decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> str: """ Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods. skip_special_tokens: if set to True, will replace special tokens. clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces. """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separatly for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = " ".join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def batch_decode(self, sequences: List[List[int]], **kwargs) -> List[str]: return [self.decode(seq, **kwargs) for seq in sequences] @staticmethod def clean_up_tokenization(out_string: str) -> str: """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string class PreTrainedTokenizerFast(PreTrainedTokenizer): """ Base class for all fast tokenizers (wrapping HuggingFace tokenizers library). Inherit from PreTrainedTokenizer. Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). Class attributes (overridden by derived classes): - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file. - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size. - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method. Args: - ``tokenizer`` (`BaseTokenizerFast`): A Fast tokenizer from the HuggingFace tokenizer library (in low level Rust language) - ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model. When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`). no associated max_length can be found in ``max_model_input_sizes``. - ``padding_side``: (`Optional`) string: the side on which the model should have padding applied. Should be selected between ['right', 'left'] - ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the model ("token_type_ids", "attention_mask"...). - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id`` - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id`` - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id`` - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id`` - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id`` - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id`` - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id`` - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids`` """ def __init__(self, tokenizer: BaseTokenizerFast, **kwargs): if not isinstance(tokenizer, BaseTokenizerFast): raise ValueError( "Tokenizer should be an instance of a Tokenizer " "provided by HuggingFace tokenizers library." ) self._tokenizer: BaseTokenizerFast = tokenizer # Initialize all the rest of the kwargs super().__init__(**kwargs) @property def backend_tokenizer(self) -> BaseTokenizerFast: return self._tokenizer @property def decoder(self) -> DecoderFast: return self._tokenizer._tokenizer.decoder @property def is_fast(self) -> bool: return True @property def vocab_size(self) -> int: return self._tokenizer.get_vocab_size(with_added_tokens=False) def __len__(self) -> int: return self._tokenizer.get_vocab_size(with_added_tokens=True) def _maybe_update_backend(self, value): """ Update the backend fast tokenizer. Override method from base class SpecialTokensMixin """ self._tokenizer.add_special_tokens(value) def _convert_encoding( self, encoding: EncodingFast, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, ) -> Dict[str, Any]: """ Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict. Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are lists (overflows) of lists (tokens). If return_tensors is not None, these lists of lists are converted to 2-D tensors for input_ids, token_type_ids and attention_mask. Output shape: (overflows, sequence length) """ if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_overflowing_tokens and encoding.overflowing is not None: encodings = [encoding] + encoding.overflowing else: encodings = [encoding] encoding_dict = defaultdict(list) for e in encodings: encoding_dict["input_ids"].append(e.ids) if return_token_type_ids: encoding_dict["token_type_ids"].append(e.type_ids) if return_attention_mask: encoding_dict["attention_mask"].append(e.attention_mask) if return_special_tokens_mask: encoding_dict["special_tokens_mask"].append(e.special_tokens_mask) if return_offsets_mapping: encoding_dict["offset_mapping"].append(e.offsets) if return_tensors is not None: encoding_dict = convert_to_tensors(encoding_dict, return_tensors) return encoding_dict def _convert_token_to_id_with_added_voc(self, token: int) -> str: index = self._tokenizer.token_to_id(token) if index is None: return self.unk_token_id return index def _convert_id_to_token(self, index: int) -> Optional[str]: return self._tokenizer.id_to_token(int(index)) def get_vocab(self): return self._tokenizer.get_vocab(True) def convert_tokens_to_string(self, tokens: List[int], skip_special_tokens: bool = False) -> str: return self._tokenizer.decode(tokens, skip_special_tokens) def add_tokens(self, new_tokens: List[Union[str, AddedTokenFast]]) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens: string or list of string or AddedTokenFast. Each string is a token to add. Tokens are only added if they are not already in the vocabulary. AddedTokenFast wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...). See details for AddedToken in HuggingFace tokenizers library. Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. """ if isinstance(new_tokens, str): new_tokens = [new_tokens] return self._tokenizer.add_tokens(new_tokens) def add_special_tokens(self, special_tokens_dict: dict) -> int: # Map special tokens to class attributes (self.pad_token...) super().add_special_tokens(special_tokens_dict) # If the backend tokenizer the only specificities of special tokens are that # - they will never be processed by the model, and # - they will be removed while decoding. # But they are not mapped to special attributes in the backend so we can just # send a list. tokens = [] for token in special_tokens_dict.values(): if isinstance(token, list): tokens += token else: tokens += [token] num_added_tokens = self._tokenizer.add_special_tokens(tokens) return num_added_tokens def num_special_tokens_to_add(self, pair: bool = False) -> int: return self._tokenizer.num_special_tokens_to_add(pair) def tokenize( self, text: TextInput, pair: Optional[TextInput] = None, add_special_tokens: bool = False ) -> List[str]: return self._tokenizer.encode(text, pair, add_special_tokens).tokens def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] ], add_special_tokens: bool = True, max_length: Optional[int] = None, stride: int = 0, truncation_strategy: str = "longest_first", pad_to_max_length: bool = False, is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_lengths: bool = False, **kwargs ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise ValueError( "batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs)) ) # Needed if we have to return a tensor pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1) # Throw an error if we can pad because there is no padding token if pad_to_max_length and self.pad_token_id is None: raise ValueError("Unable to set proper padding strategy as the tokenizer does not have a padding token") # Set the truncation and padding strategy and restore the initial configuration with truncate_and_pad( tokenizer=self._tokenizer, max_length=max_length, stride=stride, strategy=truncation_strategy, pad_to_max_length=pad_to_max_length, padding_side=self.padding_side, pad_token_id=self.pad_token_id if self._pad_token is not None else None, pad_token_type_id=self.pad_token_type_id, pad_token=self._pad_token, ): # Check for the pretokenized path if is_pretokenized: encodings = [] # Iterate over each sample (we don't know yet if they are pairs or simple input for i, sample in enumerate(batch_text_or_text_pairs): if not isinstance(sample, (list, tuple)): raise TypeError( "batch_encode_plus(..., is_pretokenized=True) requires batch_text_or_text_pairs " "to be either List[List[str]] or List[Tuple[List[str], List[str]]] but sample at " "index {} is of type {}".format(i, type(sample)) ) # Test if we have a pair of sentences by checking the depth of nesting is_pair = bool(len(sample) > 0 and isinstance(sample[0], (list, tuple))) # Take care of the first sequence - we multi-thread over the words encodings_text = EncodingFast.merge( self._tokenizer.encode_batch(sample[0] if is_pair else sample, add_special_tokens=False), growing_offsets=True, ) # Take care of the second sequence if we have a pair if is_pair: encodings_pair = EncodingFast.merge( self._tokenizer.encode_batch([("", s) for s in sample[1]], add_special_tokens=False), growing_offsets=True, ) else: encodings_pair = None # Post-process - truncate/pad and add special tokens encoding = self._tokenizer.post_process(encodings_text, encodings_pair, add_special_tokens) encodings.append(encoding) # Classical path with strings input else: # Avoid thread overhead if only one example. if len(batch_text_or_text_pairs) == 1: if isinstance(batch_text_or_text_pairs[0], (tuple, list)): encodings = self._tokenizer.encode( *batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens ) else: encodings = self._tokenizer.encode( batch_text_or_text_pairs[0], add_special_tokens=add_special_tokens ) encodings = [encodings] else: encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens ) # Convert encoding to dict # `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]] # with nested dimensions corresponding to batch, overflows, sequence length tokens = [ self._convert_encoding( encoding=encoding, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, ) for encoding in encodings ] # Sanitize the output to have dict[list] from list[dict] sanitized = {} for key in tokens[0].keys(): # To List[List[List[int]]] of shape (batch, overflows, sequence length) stack = [e for item in tokens for e in item[key]] if return_tensors == "tf": stack = tf.stack(stack, axis=0) elif return_tensors == "pt": stack = torch.stack(stack, dim=0) # elif not return_tensors and len(stack) == 1: # stack = stack[0] sanitized[key] = stack # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = flatten([[i] * len(enc["input_ids"]) for i, enc in enumerate(tokens)]) sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping return BatchEncoding(sanitized, encodings) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None, add_special_tokens: bool = True, max_length: Optional[int] = None, pad_to_max_length: bool = False, stride: int = 0, truncation_strategy: str = "longest_first", is_pretokenized: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, **kwargs ) -> BatchEncoding: # Check for pretokenized path (ie [token1, token2, ..., tokenN] -> [id1, id2, ..., idN] if is_pretokenized: if isinstance(text, list) and len(text) > 0: # Encode through encode_batch with sequence of only one word which will be merged after hand encoding = self._tokenizer.encode_batch(text, add_special_tokens=False) encoding = EncodingFast.merge(encoding, growing_offsets=True) # Let's do the same for pairs if provided if isinstance(text_pair, list): # We prepend empty string before each word so that encoding is aware content is a pair encoding_pair = self._tokenizer.encode_batch( [("", p) for p in text_pair], add_special_tokens=False ) encoding_pair = EncodingFast.merge(encoding_pair, growing_offsets=True) elif text_pair is None: encoding_pair = None else: raise TypeError( "encode_plus(..., is_pretokenized=True) requires text and text_pair to be List[str] " "but got (text={}, text_pair={})".format(type(text), type(text_pair)) ) # Post process and if asked to do so, insert special tokens where needed encoding = self._tokenizer.post_process(encoding, encoding_pair, add_special_tokens) batched_output = BatchEncoding( self._convert_encoding( encoding, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, ), encoding, ) else: raise TypeError( "encode_plus(..., is_pretokenized=True) requires text to be List[str] " "but got (text={}, text_pair={})".format(type(text), type(text_pair)) ) else: batched_input = [(text, text_pair)] if text_pair else [text] batched_output = self.batch_encode_plus( batched_input, add_special_tokens=add_special_tokens, max_length=max_length, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, pad_to_max_length=pad_to_max_length, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis if not return_tensors: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) return batched_output def decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True ) -> str: text = self._tokenizer.decode(token_ids, skip_special_tokens) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str) -> Tuple[str]: if os.path.isdir(save_directory): files = self._tokenizer.save(save_directory) else: folder, file = os.path.split(os.path.abspath(save_directory)) files = self._tokenizer.save(folder, name=file) return tuple(files) def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", "bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt", "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt", "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt", "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): r""" Constructs a BERT tokenizer. Based on WordPiece. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`string`): File containing the vocabulary. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase the input when tokenizing. do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to do basic tokenization before WordPiece. never_split (:obj:`Iterable`, `optional`, defaults to :obj:`None`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` unk_token (:obj:`string`, `optional`, defaults to "[UNK]"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`string`, `optional`, defaults to "[SEP]"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`string`, `optional`, defaults to "[PAD]"): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`string`, `optional`, defaults to "[CLS]"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`string`, `optional`, defaults to "[MASK]"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/transformers/issues/328 """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs ): super().__init__( unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | if token_ids_1 is None, only returns the first portion of the mask (0's). Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, vocab_path): """ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory. Args: vocab_path (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"]) else: vocab_file = vocab_path with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file) ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True): """ Constructs a BasicTokenizer. Args: **do_lower_case**: Whether to lower case the input. **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. **tokenize_chinese_chars**: (`optional`) boolean (default True) Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328 """ if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False class BertTokenizerFast(PreTrainedTokenizerFast): r""" Constructs a "Fast" BERT tokenizer (backed by HuggingFace's `tokenizers` library). Bert tokenization is Based on WordPiece. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`string`): File containing the vocabulary. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase the input when tokenizing. unk_token (:obj:`string`, `optional`, defaults to "[UNK]"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`string`, `optional`, defaults to "[SEP]"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`string`, `optional`, defaults to "[PAD]"): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`string`, `optional`, defaults to "[CLS]"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`string`, `optional`, defaults to "[MASK]"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/transformers/issues/328 clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to tokenize Chinese characters. This should likely be deactivated for Japanese: see: https://github.com/huggingface/transformers/issues/328 """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", clean_text=True, tokenize_chinese_chars=True, strip_accents=True, wordpieces_prefix="##", **kwargs ): super().__init__( BertWordPieceTokenizer( vocab_file=vocab_file, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, clean_text=clean_text, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, wordpieces_prefix=wordpieces_prefix, ), unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | if token_ids_1 is None, only returns the first portion of the mask (0's). Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/tokenization_utils.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import subprocess import os import socket from argparse import ArgumentParser, REMAINDER import torch def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser(description="PyTorch distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") # Optional arguments for the launch helper parser.add_argument("--nnodes", type=int, default=1, help="The number of nodes to use for distributed " "training") parser.add_argument("--node_rank", type=int, default=0, help="The rank of the node for multi-node distributed " "training") parser.add_argument("--nproc_per_node", type=int, default=1, help="The number of processes to launch on each node, " "for GPU training, this is recommended to be set " "to the number of GPUs in your system so that " "each process can be bound to a single GPU.") parser.add_argument("--master_addr", default="127.0.0.1", type=str, help="Master node (rank 0)'s address, should be either " "the IP address or the hostname of node 0, for " "single node multi-proc training, the " "--master_addr can simply be 127.0.0.1") parser.add_argument("--master_port", default=29500, type=int, help="Master node (rank 0)'s free port that needs to " "be used for communciation during distributed " "training") parser.add_argument('--no_hyperthreads', action='store_true', help='Flag to disable binding to hyperthreads') parser.add_argument('--no_membind', action='store_true', help='Flag to disable memory binding') # non-optional arguments for binding parser.add_argument("--nsockets_per_node", type=int, required=True, help="Number of CPU sockets on a node") parser.add_argument("--ncores_per_socket", type=int, required=True, help="Number of CPU cores per socket") # positional parser.add_argument("training_script", type=str, help="The full path to the single GPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # variables for numactrl binding NSOCKETS = args.nsockets_per_node NGPUS_PER_SOCKET = (args.nproc_per_node // args.nsockets_per_node) + (1 if (args.nproc_per_node % args.nsockets_per_node) else 0) NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET # world size in terms of number of processes dist_world_size = args.nproc_per_node * args.nnodes # set PyTorch distributed related environmental variables current_env = os.environ.copy() current_env["MASTER_ADDR"] = args.master_addr current_env["MASTER_PORT"] = str(args.master_port) current_env["WORLD_SIZE"] = str(dist_world_size) processes = [] for local_rank in range(0, args.nproc_per_node): # each process's rank dist_rank = args.nproc_per_node * args.node_rank + local_rank current_env["RANK"] = str(dist_rank) # form numactrl binding command cpu_ranges = [local_rank * NCORES_PER_GPU, (local_rank + 1) * NCORES_PER_GPU - 1, local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS), (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1] numactlargs = [] if args.no_hyperthreads: numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ] else: numactlargs += [ "--physcpubind={}-{},{}-{}".format(*cpu_ranges) ] if not args.no_membind: memnode = local_rank // NGPUS_PER_SOCKET numactlargs += [ "--membind={}".format(memnode) ] # spawn the processes cmd = [ "/usr/bin/numactl" ] \ + numactlargs \ + [ sys.executable, "-u", args.training_script, "--local_rank={}".format(local_rank) ] \ + args.training_script_args process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/bind_pyt.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extract pre-computed feature vectors from a PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections import logging import json import re import torch from torch.utils.data import TensorDataset, DataLoader, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tokenization import BertTokenizer from modeling import BertModel logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class InputExample(object): def __init__(self, unique_id, text_a, text_b): self.unique_id = unique_id self.text_a = text_a self.text_b = text_b class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.unique_id)) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with open(input_file, "r", encoding='utf-8') as reader: while True: line = reader.readline() if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--input_file", default=None, type=str, required=True) parser.add_argument("--output_file", default=None, type=str, required=True) parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") ## Other parameters parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--layers", default="-1,-2,-3,-4", type=str) parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences longer " "than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1))) layer_indexes = [int(x) for x in args.layers.split(",")] tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) examples = read_examples(args.input_file) features = convert_examples_to_features( examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer) unique_id_to_feature = {} for feature in features: unique_id_to_feature[feature.unique_id] = feature model = BertModel.from_pretrained(args.bert_model) model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index) if args.local_rank == -1: eval_sampler = SequentialSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) model.eval() with open(args.output_file, "w", encoding='utf-8') as writer: for input_ids, input_mask, example_indices in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask) all_encoder_layers = all_encoder_layers for b, example_index in enumerate(example_indices): feature = features[example_index.item()] unique_id = int(feature.unique_id) # feature = unique_id_to_feature[unique_id] output_json = collections.OrderedDict() output_json["linex_index"] = unique_id all_out_features = [] for (i, token) in enumerate(feature.tokens): all_layers = [] for (j, layer_index) in enumerate(layer_indexes): layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy() layer_output = layer_output[b] layers = collections.OrderedDict() layers["index"] = layer_index layers["values"] = [ round(x.item(), 6) for x in layer_output[i] ] all_layers.append(layers) out_features = collections.OrderedDict() out_features["token"] = token out_features["layers"] = all_layers all_out_features.append(out_features) output_json["features"] = all_out_features writer.write(json.dumps(output_json) + "\n") if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/extract_features.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import, division, print_function import pickle import argparse import logging import os import random import wget import json import time import dllogger import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from file_utils import PYTORCH_PRETRAINED_BERT_CACHE import modeling from tokenization import BertTokenizer from optimization import BertAdam, warmup_linear from schedulers import LinearWarmUpScheduler from apex import amp from sklearn.metrics import matthews_corrcoef, f1_score from utils import (is_main_process, mkdir_by_main_process, format_step, get_world_size) from processors.glue import PROCESSORS, convert_examples_to_features torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) logger = logging.getLogger(__name__) def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) from apex.multi_tensor_apply import multi_tensor_applier class GradientClipper: """ Clips gradient norm of an iterable of parameters. """ def __init__(self, max_grad_norm): self.max_norm = max_grad_norm if multi_tensor_applier.available: import amp_C self._overflow_buf = torch.cuda.IntTensor([0]) self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm self.multi_tensor_scale = amp_C.multi_tensor_scale else: raise RuntimeError('Gradient clipping requires cuda extensions') def step(self, parameters): l = [p.grad for p in parameters if p.grad is not None] total_norm, _ = multi_tensor_applier( self.multi_tensor_l2norm, self._overflow_buf, [l], False, ) total_norm = total_norm.item() if (total_norm == float('inf')): return clip_coef = self.max_norm / (total_norm + 1e-6) if clip_coef < 1: multi_tensor_applier( self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef, ) def parse_args(parser=argparse.ArgumentParser()): ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data " "files) for the task.", ) parser.add_argument( "--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, " "bert-base-multilingual-uncased, bert-base-multilingual-cased, " "bert-base-chinese.", ) parser.add_argument( "--task_name", default=None, type=str, required=True, choices=PROCESSORS.keys(), help="The name of the task to train.", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints " "will be written.", ) parser.add_argument( "--init_checkpoint", default=None, type=str, required=True, help="The checkpoint file from pretraining", ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece " "tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.", ) parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to get model-task performance on the dev" " set by running eval.") parser.add_argument("--do_predict", action='store_true', help="Whether to output prediction results on the dev " "set by running eval.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size per GPU for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Batch size per GPU for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1.0, type=float, help="Total number of training steps to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup " "for. E.g., 0.1 = 10%% of training.", ) parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=1, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a " "backward/update pass.") parser.add_argument( '--fp16', action='store_true', help="Mixed precision training", ) parser.add_argument( '--amp', action='store_true', help="Mixed precision training", ) parser.add_argument( '--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when " "fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n", ) parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") parser.add_argument("--config_file", default=None, type=str, required=True, help="The BERT model config") parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints") return parser.parse_args() def init_optimizer_and_amp(model, learning_rate, loss_scale, warmup_proportion, num_train_optimization_steps, use_fp16): param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ { 'params': [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }, ] optimizer, scheduler = None, None if use_fp16: logger.info("using fp16") try: from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from " "https://www.github.com/nvidia/apex to use " "distributed and fp16 training.") if num_train_optimization_steps is not None: optimizer = FusedAdam( optimizer_grouped_parameters, lr=learning_rate, bias_correction=False, ) amp_inits = amp.initialize( model, optimizers=optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale="dynamic" if loss_scale == 0 else loss_scale, ) model, optimizer = (amp_inits if num_train_optimization_steps is not None else (amp_inits, None)) if num_train_optimization_steps is not None: scheduler = LinearWarmUpScheduler( optimizer, warmup=warmup_proportion, total_steps=num_train_optimization_steps, ) else: logger.info("using fp32") if num_train_optimization_steps is not None: optimizer = BertAdam( optimizer_grouped_parameters, lr=learning_rate, warmup=warmup_proportion, t_total=num_train_optimization_steps, ) return model, optimizer, scheduler def gen_tensor_dataset(features): all_input_ids = torch.tensor( [f.input_ids for f in features], dtype=torch.long, ) all_input_mask = torch.tensor( [f.input_mask for f in features], dtype=torch.long, ) all_segment_ids = torch.tensor( [f.segment_ids for f in features], dtype=torch.long, ) all_label_ids = torch.tensor( [f.label_id for f in features], dtype=torch.long, ) return TensorDataset( all_input_ids, all_input_mask, all_segment_ids, all_label_ids, ) def get_train_features(data_dir, bert_model, max_seq_length, do_lower_case, local_rank, train_batch_size, gradient_accumulation_steps, num_train_epochs, tokenizer, processor): cached_train_features_file = os.path.join( data_dir, '{0}_{1}_{2}'.format( list(filter(None, bert_model.split('/'))).pop(), str(max_seq_length), str(do_lower_case), ), ) train_features = None try: with open(cached_train_features_file, "rb") as reader: train_features = pickle.load(reader) logger.info("Loaded pre-processed features from {}".format( cached_train_features_file)) except: logger.info("Did not find pre-processed features from {}".format( cached_train_features_file)) train_examples = processor.get_train_examples(data_dir) train_features, _ = convert_examples_to_features( train_examples, processor.get_labels(), max_seq_length, tokenizer, ) if is_main_process(): logger.info(" Saving train features into cached file %s", cached_train_features_file) with open(cached_train_features_file, "wb") as writer: pickle.dump(train_features, writer) return train_features def dump_predictions(path, label_map, preds, examples): label_rmap = {label_idx: label for label, label_idx in label_map.items()} predictions = { example.guid: label_rmap[preds[i]] for i, example in enumerate(examples) } with open(path, "w") as writer: json.dump(predictions, writer) def main(args): args.fp16 = args.fp16 or args.amp if args.server_ip and args.server_port: # Distant debugging - see # https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd logger.info("Waiting for debugger attach") ptvsd.enable_attach( address=(args.server_ip, args.server_port), redirect_output=True, ) ptvsd.wait_for_attach() if args.local_rank == -1 or args.no_cuda: device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of # sychronizing nodes/GPUs. if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, " "16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16, )) if not args.do_train and not args.do_eval and not args.do_predict: raise ValueError("At least one of `do_train`, `do_eval` or " "`do_predict` must be True.") if is_main_process(): if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train): logger.warning("Output directory ({}) already exists and is not " "empty.".format(args.output_dir)) mkdir_by_main_process(args.output_dir) if is_main_process(): dllogger.init(backends=[ dllogger.JSONStreamBackend( verbosity=dllogger.Verbosity.VERBOSE, filename=os.path.join(args.output_dir, 'dllogger.json'), ), dllogger.StdOutBackend( verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step, ), ]) else: dllogger.init(backends=[]) dllogger.metadata("e2e_train_time", {"unit": "s"}) dllogger.metadata("training_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("e2e_inference_time", {"unit": "s"}) dllogger.metadata("inference_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("exact_match", {"unit": None}) dllogger.metadata("F1", {"unit": None}) dllogger.log(step="PARAMETER", data={"Config": [str(args)]}) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, " "should be >= 1".format( args.gradient_accumulation_steps)) if args.gradient_accumulation_steps > args.train_batch_size: raise ValueError("gradient_accumulation_steps ({}) cannot be larger " "train_batch_size ({}) - there cannot be a fraction " "of one sample.".format( args.gradient_accumulation_steps, args.train_batch_size, )) args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) dllogger.log(step="PARAMETER", data={"SEED": args.seed}) processor = PROCESSORS[args.task_name]() num_labels = len(processor.get_labels()) #tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) tokenizer = BertTokenizer( args.vocab_file, do_lower_case=args.do_lower_case, max_len=512, ) # for bert large num_train_optimization_steps = None if args.do_train: train_features = get_train_features( args.data_dir, args.bert_model, args.max_seq_length, args.do_lower_case, args.local_rank, args.train_batch_size, args.gradient_accumulation_steps, args.num_train_epochs, tokenizer, processor, ) num_train_optimization_steps = int( len(train_features) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size()) # Prepare model config = modeling.BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) # modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training model = modeling.BertForSequenceClassification( config, num_labels=num_labels, ) logger.info("USING CHECKPOINT from {}".format(args.init_checkpoint)) checkpoint = torch.load(args.init_checkpoint, map_location='cpu') checkpoint = checkpoint["model"] if "model" in checkpoint.keys() else checkpoint model.load_state_dict(checkpoint, strict=False) logger.info("USED CHECKPOINT from {}".format(args.init_checkpoint)) dllogger.log( step="PARAMETER", data={ "num_parameters": sum([p.numel() for p in model.parameters() if p.requires_grad]), }, ) model.to(device) # Prepare optimizer model, optimizer, scheduler = init_optimizer_and_amp( model, args.learning_rate, args.loss_scale, args.warmup_proportion, num_train_optimization_steps, args.fp16, ) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from " "https://www.github.com/nvidia/apex to use " "distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) loss_fct = torch.nn.CrossEntropyLoss() results = {} if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_features)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) train_data = gen_tensor_dataset(train_features) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader( train_data, sampler=train_sampler, batch_size=args.train_batch_size, ) global_step = 0 nb_tr_steps = 0 tr_loss = 0 latency_train = 0.0 nb_tr_examples = 0 model.train() tic_train = time.perf_counter() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): if args.max_steps > 0 and global_step > args.max_steps: break batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch logits = model(input_ids, segment_ids, input_mask) loss = loss_fct( logits.view(-1, num_labels), label_ids.view(-1), ) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up for BERT # which FusedAdam doesn't do scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 latency_train = time.perf_counter() - tic_train tr_loss = tr_loss / nb_tr_steps results.update({ 'global_step': global_step, 'train:loss': tr_loss, 'train:latency': latency_train, 'train:num_samples_per_gpu': nb_tr_examples, 'train:num_steps': nb_tr_steps, 'train:throughput': get_world_size() * nb_tr_examples / latency_train, }) if is_main_process() and not args.skip_checkpoint: model_to_save = model.module if hasattr(model, 'module') else model torch.save( {"model": model_to_save.state_dict()}, os.path.join(args.output_dir, modeling.WEIGHTS_NAME), ) with open( os.path.join(args.output_dir, modeling.CONFIG_NAME), 'w', ) as f: f.write(model_to_save.config.to_json_string()) if (args.do_eval or args.do_predict) and is_main_process(): eval_examples = processor.get_dev_examples(args.data_dir) eval_features, label_map = convert_examples_to_features( eval_examples, processor.get_labels(), args.max_seq_length, tokenizer, ) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) eval_data = gen_tensor_dataset(eval_features) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader( eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, ) model.eval() preds = None out_label_ids = None eval_loss = 0 nb_eval_steps, nb_eval_examples = 0, 0 cuda_events = [(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)) for _ in range(len(eval_dataloader))] for i, (input_ids, input_mask, segment_ids, label_ids) in tqdm( enumerate(eval_dataloader), desc="Evaluating", ): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): cuda_events[i][0].record() logits = model(input_ids, segment_ids, input_mask) cuda_events[i][1].record() if args.do_eval: eval_loss += loss_fct( logits.view(-1, num_labels), label_ids.view(-1), ).mean().item() nb_eval_steps += 1 nb_eval_examples += input_ids.size(0) if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = label_ids.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, label_ids.detach().cpu().numpy(), axis=0, ) torch.cuda.synchronize() eval_latencies = [ event_start.elapsed_time(event_end) for event_start, event_end in cuda_events ] eval_latencies = list(sorted(eval_latencies)) def infer_latency_sli(threshold): index = int(len(eval_latencies) * threshold) - 1 index = min(max(index, 0), len(eval_latencies) - 1) return eval_latencies[index] eval_throughput = (args.eval_batch_size / (np.mean(eval_latencies) / 1000)) results.update({ 'eval:num_samples_per_gpu': nb_eval_examples, 'eval:num_steps': nb_eval_steps, 'infer:latency(ms):50%': infer_latency_sli(0.5), 'infer:latency(ms):90%': infer_latency_sli(0.9), 'infer:latency(ms):95%': infer_latency_sli(0.95), 'infer:latency(ms):99%': infer_latency_sli(0.99), 'infer:latency(ms):100%': infer_latency_sli(1.0), 'infer:latency(ms):avg': np.mean(eval_latencies), 'infer:latency(ms):std': np.std(eval_latencies), 'infer:latency(ms):sum': np.sum(eval_latencies), 'infer:throughput(samples/s):avg': eval_throughput, }) preds = np.argmax(preds, axis=1) if args.do_predict: dump_predictions( os.path.join(args.output_dir, 'predictions.json'), label_map, preds, eval_examples, ) if args.do_eval: results['eval:loss'] = eval_loss / nb_eval_steps eval_result = compute_metrics(args.task_name, preds, out_label_ids) results.update(eval_result) if is_main_process(): logger.info("***** Results *****") for key in sorted(results.keys()): logger.info(" %s = %s", key, str(results[key])) with open(os.path.join(args.output_dir, "results.txt"), "w") as writer: json.dump(results, writer) dllogger_queries_from_results = { 'exact_match': 'acc', 'F1': 'f1', 'e2e_train_time': 'train:latency', 'training_sequences_per_second': 'train:throughput', 'e2e_inference_time': ('infer:latency(ms):sum', lambda x: x / 1000), 'inference_sequences_per_second': 'infer:throughput(samples/s):avg', } for key, query in dllogger_queries_from_results.items(): results_key, convert = (query if isinstance(query, tuple) else (query, lambda x: x)) if results_key not in results: continue dllogger.log( step=tuple(), data={key: convert(results[results_key])}, ) dllogger.flush() return results if __name__ == "__main__": main(parse_args())
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/run_glue.py
# coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ #from fused_adam_local import FusedAdam from apex.optimizers import FusedAdam from apex.multi_tensor_apply import multi_tensor_applier import amp_C from utils import is_main_process multi_tensor_l2norm = amp_C.multi_tensor_l2norm lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda scale = amp_C.multi_tensor_scale def warmup_cosine(x, warmup=0.002): if x < warmup: return x/warmup return 0.5 * (1.0 + torch.cos(math.pi * x)) def warmup_constant(x, warmup=0.002): if x < warmup: return x/warmup return 1.0 def warmup_linear(x, warmup=0.002): if x < warmup: return x/warmup return max((x - 1. )/ (warmup - 1.), 0.) def warmup_poly(x, warmup=0.002, degree=0.5): if x < warmup: return x/warmup return (1.0 - x)**degree SCHEDULES = { 'warmup_cosine':warmup_cosine, 'warmup_constant':warmup_constant, 'warmup_linear':warmup_linear, 'warmup_poly':warmup_poly, } class BertAdam(Optimizer): """Implements BERT version of Adam algorithm with weight decay fix. Params: lr: learning rate warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 t_total: total number of training steps for the learning rate schedule, -1 means constant learning rate. Default: -1 schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' b1: Adams b1. Default: 0.9 b2: Adams b2. Default: 0.999 e: Adams epsilon. Default: 1e-6 weight_decay: Weight decay. Default: 0.01 max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 """ def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= warmup < 1.0 and not warmup == -1: raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) if not 0.0 <= b1 < 1.0: raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) if not 0.0 <= b2 < 1.0: raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) if not e >= 0.0: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if len(state) == 0: return [0] if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm'], error_if_nonfinite=False) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 return loss
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/optimization.py
# coding=utf-8 # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run BERT on SQuAD.""" from __future__ import absolute_import, division, print_function import argparse import collections import json import logging import math import os import random import sys from io import open from pathlib import Path import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from apex import amp from schedulers import LinearWarmUpScheduler from file_utils import PYTORCH_PRETRAINED_BERT_CACHE import modeling from optimization import BertAdam, warmup_linear from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) from utils import is_main_process, format_step import dllogger, time torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) if sys.version_info[0] == 2: import cPickle as pickle else: import pickle logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def get_answers(examples, features, results, args): predictions = collections.defaultdict(list) #it is possible that one example corresponds to multiple features Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit']) if args.version_2_with_negative: null_vals = collections.defaultdict(lambda: (float("inf"),0,0)) for ex, feat, result in match_results(examples, features, results): start_indices = _get_best_indices(result.start_logits, args.n_best_size) end_indices = _get_best_indices(result.end_logits, args.n_best_size) prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) if args.version_2_with_negative: score = result.start_logits[0] + result.end_logits[0] if score < null_vals[ex.qas_id][0]: null_vals[ex.qas_id] = (score, result.start_logits[0], result.end_logits[0]) curr_predictions = [] seen_predictions = [] for pred in prelim_predictions: if len(curr_predictions) == args.n_best_size: break if pred.start_index > 0: # this is a non-null prediction TODO: this probably is irrelevant final_text = get_answer_text(ex, feat, pred, args) if final_text in seen_predictions: continue else: final_text = "" seen_predictions.append(final_text) curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit)) predictions[ex.qas_id] += curr_predictions #Add empty prediction if args.version_2_with_negative: for qas_id in predictions.keys(): predictions[qas_id].append(Prediction('', null_vals[ex.qas_id][1], null_vals[ex.qas_id][2])) nbest_answers = collections.defaultdict(list) answers = {} for qas_id, preds in predictions.items(): nbest = sorted( preds, key=lambda x: (x.start_logit + x.end_logit), reverse=True)[:args.n_best_size] # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(Prediction(text="empty", start_logit=0.0, end_logit=0.0)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry and entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_answers[qas_id].append(output) if args.version_2_with_negative: score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit if score_diff > args.null_score_diff_threshold: answers[qas_id] = "" else: answers[qas_id] = best_non_null_entry.text else: answers[qas_id] = nbest_answers[qas_id][0]['text'] return answers, nbest_answers def get_answer_text(example, feature, pred, args): tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging) return final_text def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args): _PrelimPrediction = collections.namedtuple( "PrelimPrediction", ["start_index", "end_index", "start_logit", "end_logit"]) prelim_predictions = [] for start_index in start_indices: for end_index in end_indices: if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > args.max_answer_length: continue prelim_predictions.append( _PrelimPrediction( start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) return prelim_predictions def match_results(examples, features, results): unique_f_ids = set([f.unique_id for f in features]) unique_r_ids = set([r.unique_id for r in results]) matching_ids = unique_f_ids & unique_r_ids features = [f for f in features if f.unique_id in matching_ids] results = [r for r in results if r.unique_id in matching_ids] features.sort(key=lambda x: x.unique_id) results.sort(key=lambda x: x.unique_id) for f, r in zip(features, results): #original code assumes strict ordering of examples. TODO: rewrite this yield examples[f.example_index], f, r def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heruistic between # `pred_text` and `orig_text` to get a character-to-charcter alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indices(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indices = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indices.append(index_and_score[i][0]) return best_indices def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs from apex.multi_tensor_apply import multi_tensor_applier class GradientClipper: """ Clips gradient norm of an iterable of parameters. """ def __init__(self, max_grad_norm): self.max_norm = max_grad_norm if multi_tensor_applier.available: import amp_C self._overflow_buf = torch.cuda.IntTensor([0]) self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm self.multi_tensor_scale = amp_C.multi_tensor_scale else: raise RuntimeError('Gradient clipping requires cuda extensions') def step(self, parameters): l = [p.grad for p in parameters if p.grad is not None] total_norm, _ = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [l], False) total_norm = total_norm.item() if (total_norm == float('inf')): return clip_coef = self.max_norm / (total_norm + 1e-6) if clip_coef < 1: multi_tensor_applier(self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--init_checkpoint", default=None, type=str, required=True, help="The checkpoint file from pretraining") ## Other parameters parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1.0, type=float, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% " "of training.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json " "output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument('--fp16', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--amp', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") parser.add_argument("--config_file", default=None, type=str, required=True, help="The BERT model config") parser.add_argument('--log_freq', type=int, default=50, help='frequency of logging loss.') parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to' 'the specified file.') parser.add_argument("--eval_script", help="Script to evaluate squad predictions", default="evaluate.py", type=str) parser.add_argument("--do_eval", action='store_true', help="Whether to use evaluate accuracy of predictions") parser.add_argument("--use_env", action='store_true', help="Whether to read local rank from ENVVAR") parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints") parser.add_argument('--disable-progress-bar', default=False, action='store_true', help='Disable tqdm progress bar') parser.add_argument("--skip_cache", default=False, action='store_true', help="Whether to cache train features") parser.add_argument("--cache_dir", default=None, type=str, help="Location to cache train feaures. Will default to the dataset directory") parser.add_argument("--profile", default=False, action='store_true', help="Whether to profile model.") args = parser.parse_args() args.fp16 = args.fp16 or args.amp if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl', init_method='env://') n_gpu = 1 if is_main_process(): Path(os.path.dirname(args.json_summary)).mkdir(parents=True, exist_ok=True) dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) else: dllogger.init(backends=[]) dllogger.metadata("e2e_train_time", {"unit": "s"}) dllogger.metadata("training_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("final_loss", {"unit": None}) dllogger.metadata("e2e_inference_time", {"unit": "s"}) dllogger.metadata("inference_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("exact_match", {"unit": None}) dllogger.metadata("F1", {"unit": None}) print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) dllogger.log(step="PARAMETER", data={"Config": [str(args)]}) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) dllogger.log(step="PARAMETER", data={"SEED": args.seed}) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if args.do_train: if not args.train_file: raise ValueError( "If `do_train` is True, then `train_file` must be specified.") if args.do_predict: if not args.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and os.listdir(args.output_dir)!=['logfile.txt']: print("WARNING: Output directory {} already exists and is not empty.".format(args.output_dir), os.listdir(args.output_dir)) if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir) tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large # tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = read_squad_examples( input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative) num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model config = modeling.BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) model = modeling.BertForQuestionAnswering(config) # model = modeling.BertForQuestionAnswering.from_pretrained(args.bert_model, # cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))) dllogger.log(step="PARAMETER", data={"loading_checkpoint": True}) checkpoint = torch.load(args.init_checkpoint, map_location='cpu') checkpoint = checkpoint["model"] if "model" in checkpoint.keys() else checkpoint model.load_state_dict(checkpoint, strict=False) dllogger.log(step="PARAMETER", data={"loaded_checkpoint": True}) model.to(device) num_weights = sum([p.numel() for p in model.parameters() if p.requires_grad]) dllogger.log(step="PARAMETER", data={"model_weights_num":num_weights}) # Prepare optimizer param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.do_train: if args.fp16: try: from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale="dynamic") else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale) if args.do_train: scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) global_step = 0 if args.do_train: if args.cache_dir is None: cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format( list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length)) else: cached_train_features_file = args.cache_dir.strip('/') + '/' + args.train_file.split('/')[-1] + '_{0}_{1}_{2}_{3}'.format( list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length)) train_features = None try: with open(cached_train_features_file, "rb") as reader: train_features = pickle.load(reader) except: train_features = convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True) if not args.skip_cache and is_main_process(): dllogger.log(step="PARAMETER", data={"Cached_train features_file": cached_train_features_file}) with open(cached_train_features_file, "wb") as writer: pickle.dump(train_features, writer) dllogger.log(step="PARAMETER", data={"train_start": True}) dllogger.log(step="PARAMETER", data={"training_samples": len(train_examples)}) dllogger.log(step="PARAMETER", data={"training_features": len(train_features)}) dllogger.log(step="PARAMETER", data={"train_batch_size":args.train_batch_size}) dllogger.log(step="PARAMETER", data={"steps":num_train_optimization_steps}) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size * n_gpu) model.train() gradClipper = GradientClipper(max_grad_norm=1.0) final_loss = None train_start = time.time() for epoch in range(int(args.num_train_epochs)): train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader for step, batch in enumerate(train_iter): # Terminate early for benchmarking if args.max_steps > 0 and global_step > args.max_steps: break if n_gpu == 1: batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self input_ids, input_mask, segment_ids, start_positions, end_positions = batch start_logits, end_logits = model(input_ids, segment_ids, input_mask) # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) loss = (start_loss + end_loss) / 2 if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # gradient clipping gradClipper.step(amp.master_params(optimizer)) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16 : # modify learning rate with special warm up for BERT which FusedAdam doesn't do scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 final_loss = loss.item() if step % args.log_freq == 0: dllogger.log(step=(epoch, global_step,), data={"step_loss": final_loss, "learning_rate": optimizer.param_groups[0]['lr']}) time_to_train = time.time() - train_start if args.do_train and is_main_process() and not args.skip_checkpoint: # Save a trained model and the associated configuration model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME) torch.save({"model":model_to_save.state_dict()}, output_model_file) output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string()) if args.do_predict and (args.local_rank == -1 or is_main_process()): if not args.do_train and args.fp16: model.half() eval_examples = read_squad_examples( input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False) dllogger.log(step="PARAMETER", data={"infer_start": True}) dllogger.log(step="PARAMETER", data={"eval_samples": len(eval_examples)}) dllogger.log(step="PARAMETER", data={"eval_features": len(eval_features)}) dllogger.log(step="PARAMETER", data={"predict_batch_size": args.predict_batch_size}) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) infer_start = time.time() model.eval() all_results = [] dllogger.log(step="PARAMETER", data={"eval_start": True}) for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.disable_progress_bar): if len(all_results) % 1000 == 0: dllogger.log(step="PARAMETER", data={"sample_number": len(all_results)}) input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) for i, example_index in enumerate(example_indices): start_logits = batch_start_logits[i].detach().cpu().tolist() end_logits = batch_end_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature.unique_id) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) time_to_infer = time.time() - infer_start output_prediction_file = os.path.join(args.output_dir, "predictions.json") output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json") answers, nbest_answers = get_answers(eval_examples, eval_features, all_results, args) with open(output_prediction_file, "w") as f: f.write(json.dumps(answers, indent=4) + "\n") with open(output_nbest_file, "w") as f: f.write(json.dumps(nbest_answers, indent=4) + "\n") # output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json") # write_predictions(eval_examples, eval_features, all_results, # args.n_best_size, args.max_answer_length, # args.do_lower_case, output_prediction_file, # output_nbest_file, output_null_log_odds_file, args.verbose_logging, # args.version_2_with_negative, args.null_score_diff_threshold) if args.do_eval and is_main_process(): import sys import subprocess eval_out = subprocess.check_output([sys.executable, args.eval_script, args.predict_file, args.output_dir + "/predictions.json"]) scores = str(eval_out).strip() exact_match = float(scores.split(":")[1].split(",")[0]) f1 = float(scores.split(":")[2].split("}")[0]) if args.do_train: gpu_count = n_gpu if torch.distributed.is_initialized(): gpu_count = torch.distributed.get_world_size() if args.max_steps == -1: dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train, "training_sequences_per_second": len(train_features) * args.num_train_epochs / time_to_train, "final_loss": final_loss}) else: dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train, "training_sequences_per_second": args.train_batch_size * args.gradient_accumulation_steps \ * args.max_steps * gpu_count / time_to_train, "final_loss": final_loss}) if args.do_predict and is_main_process(): dllogger.log(step=tuple(), data={"e2e_inference_time": time_to_infer, "inference_sequences_per_second": len(eval_features) / time_to_infer}) if args.do_eval and is_main_process(): dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1}) if __name__ == "__main__": main() dllogger.flush()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/run_squad.py
# coding=utf-8 # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import logging import os import unicodedata import six from io import open from file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, } VOCAB_NAME = 'vocab.txt' def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: raise ValueError( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case self.never_split = never_split def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" if text in self.never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/tokenization.py
# coding=utf-8 # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # ================== import csv import os import time import argparse import random import logging import h5py from tqdm import tqdm, trange from typing import Final, Any, Callable import os import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset from torch.utils.data.distributed import DistributedSampler import torch.distributed as dist import math import modeling from schedulers import PolyWarmUpScheduler from lamb_amp_opt.fused_lamb import FusedLAMBAMP from file_utils import PYTORCH_PRETRAINED_BERT_CACHE from utils import is_main_process, format_step, get_world_size, get_rank from torch.nn.parallel import DistributedDataParallel as DDP from schedulers import LinearWarmUpScheduler import dllogger import lddl.torch # Enabling the TorchScript Runtime Backend NVFuser torch._C._jit_set_nvfuser_enabled(True) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_bailout_depth(20) # Track whether a SIGTERM (cluster time up) has been handled timeout_sent = False import signal # handle SIGTERM sent from the scheduler and mark so we # can gracefully save & exit def signal_handler(sig, frame): global timeout_sent timeout_sent = True signal.signal(signal.SIGTERM, signal_handler) class BertPretrainingCriterion(torch.nn.Module): sequence_output_is_dense: Final[bool] def __init__(self, vocab_size, sequence_output_is_dense=False): super(BertPretrainingCriterion, self).__init__() self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1) self.vocab_size = vocab_size self.sequence_output_is_dense = sequence_output_is_dense def forward(self, prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels): if self.sequence_output_is_dense: # prediction_scores are already dense masked_lm_labels_flat = masked_lm_labels.view(-1) mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != -1] masked_lm_loss = self.loss_fn(prediction_scores.view(-1, self.vocab_size), mlm_labels.view(-1)) else: masked_lm_loss = self.loss_fn(prediction_scores.view(-1, self.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = self.loss_fn(seq_relationship_score.view(-1, 2), next_sentence_labels.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss class SyncFreeStats : def __init__(self) : self.host_stats = {} self.device_stats = {} self.device_funcs = {} def add_stat(self, name, dtype=torch.int32, device_tensor=None, device_func=None) : if device_tensor is not None : assert dtype == device_tensor.dtype, "Error: dtype do not match: {} {}".format(dtype, device_tensor.dtype) self.host_stats[name] = torch.zeros(1, dtype=dtype).pin_memory() self.device_stats[name] = device_tensor self.device_funcs[name] = device_func def copy_from_device(self) : for name in self.host_stats.keys() : # Apply device function to device stat if self.device_stats[name] is not None and self.device_funcs[name] is not None: self.host_stats[name].copy_(self.device_funcs[name](self.device_stats[name]), non_blocking=True) elif self.device_stats[name] is not None : self.host_stats[name].copy_(self.device_stats[name], non_blocking=True) elif self.device_funcs[name] is not None : self.host_stats[name].copy_(self.device_funcs[name](), non_blocking=True) def host_stat(self, name) : assert name in self.host_stats return self.host_stats[name] def host_stat_value(self, name) : assert name in self.host_stats return self.host_stats[name].item() def update_host_stat(self, name, tensor) : self.host_stats[name] = tensor def device_stat(self, name) : assert self.device_stats[name] is not None return self.device_stats[name] def update_device_stat(self, name, tensor) : self.device_stats[name] = tensor def parse_arguments(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--input_dir", default=None, type=str, required=True, help="The input data dir. Should contain .parquet files for the task.") parser.add_argument("--config_file", default=None, type=str, required=True, help="The BERT model config") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") ## Other parameters parser.add_argument("--init_checkpoint", default=None, type=str, help="The initial checkpoint to start training from.") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--max_predictions_per_seq", default=80, type=int, help="The maximum total of masked tokens in input sequence") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=1000, type=float, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.01, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumualte before performing a backward/update pass.") parser.add_argument('--fp16', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--amp', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--loss_scale', type=float, default=0.0, help='Loss scaling, positive power of 2 values can improve fp16 convergence.') parser.add_argument('--log_freq', type=float, default=1.0, help='frequency of logging loss.') parser.add_argument('--checkpoint_activations', default=False, action='store_true', help="Whether to use gradient checkpointing") parser.add_argument("--resume_from_checkpoint", default=False, action='store_true', help="Whether to resume training from checkpoint.") parser.add_argument('--resume_step', type=int, default=-1, help="Step to resume training from.") parser.add_argument('--num_steps_per_checkpoint', type=int, default=100, help="Number of update steps until a model checkpoint is saved to disk.") parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints") parser.add_argument('--phase2', default=False, action='store_true', help="Whether to train with seq len 512") parser.add_argument('--resume_phase2', default=False, action='store_true', help="Whether to resume training with seq len 512") parser.add_argument('--allreduce_post_accumulation', default=False, action='store_true', help="Whether to do allreduces during gradient accumulation steps.") parser.add_argument('--allreduce_post_accumulation_fp16', default=False, action='store_true', help="Whether to do fp16 allreduce post accumulation.") parser.add_argument('--phase1_end_step', type=int, default=7038, help="Number of training steps in Phase1 - seq len 128") parser.add_argument('--init_loss_scale', type=int, default=2**20, help="Initial loss scaler value") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to' 'the specified file.') parser.add_argument("--use_env", action='store_true', help="Whether to read local rank from ENVVAR") parser.add_argument('--disable_progress_bar', default=False, action='store_true', help='Disable tqdm progress bar') parser.add_argument('--steps_this_run', type=int, default=-1, help='If provided, only run this many steps before exiting') parser.add_argument("--profile", default=False, action='store_true', help="Whether to profile model.") parser.add_argument("--profile-start", default=0, type=int, help="Delay profiling to start step.") parser.add_argument('--num_workers', type=int, default=4, help='number of DataLoader worker processes per rank') # optimizations controlled by command line arguments parser.add_argument("--no_dense_sequence_output", default=False, action='store_true', help="Disable dense sequence output") parser.add_argument("--disable_jit_fusions", default=False, action='store_true', help="Disable jit fusions.") parser.add_argument("--cuda_graphs", default=False, action='store_true', help="Enable Cuda Graphs.") args = parser.parse_args() args.fp16 = args.fp16 or args.amp if args.steps_this_run < 0: args.steps_this_run = args.max_steps return args def setup_training(args): assert (torch.cuda.is_available()) if args.local_rank == -1: device = torch.device("cuda", 0) args.n_gpu = 1 # torch.cuda.device_count() args.allreduce_post_accumulation = False args.allreduce_post_accumulation_fp16 = False else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if args.cuda_graphs : os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" torch.distributed.init_process_group(backend='nccl', init_method='env://') args.n_gpu = 1 if is_main_process(): dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) else: dllogger.init(backends=[]) dllogger.metadata("e2e_train_time", {"unit": "s"}) dllogger.metadata("training_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("final_loss", {"unit": None}) dllogger.metadata("raw_train_time", {"unit": "s"}) print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, args.n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if args.train_batch_size % args.gradient_accumulation_steps != 0: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format( args.gradient_accumulation_steps, args.train_batch_size)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps if not args.do_train: raise ValueError(" `do_train` must be True.") if not args.resume_from_checkpoint and os.path.exists(args.output_dir) and ( os.listdir(args.output_dir) and any([i.startswith('ckpt') for i in os.listdir(args.output_dir)])): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if (not args.resume_from_checkpoint or not os.path.exists(args.output_dir)) and is_main_process(): os.makedirs(args.output_dir, exist_ok=True) return device, args def prepare_model_and_optimizer(args, device, sequence_output_is_dense): # Prepare model config = modeling.BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) model = modeling.BertForPreTraining(config, sequence_output_is_dense=sequence_output_is_dense) checkpoint = None if not args.resume_from_checkpoint: global_step = 0 else: if args.resume_step == -1 and not args.init_checkpoint: model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")] args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names]) global_step = args.resume_step if not args.init_checkpoint else 0 if not args.init_checkpoint: checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location=device) else: checkpoint = torch.load(args.init_checkpoint, map_location=device) model.load_state_dict(checkpoint['model'], strict=False) if args.phase2 and not args.init_checkpoint: global_step -= args.phase1_end_step if args.init_checkpoint: args.resume_step = 0 if is_main_process(): print("resume step from ", args.resume_step) model.to(device) # If allreduce_post_accumulation_fp16 is not set, Native AMP Autocast is # used along with FP32 gradient accumulation and all-reduce if args.fp16 and args.allreduce_post_accumulation_fp16: model.half() if not args.disable_jit_fusions : model = torch.jit.script(model) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] optimizer = FusedLAMBAMP(optimizer_grouped_parameters, lr=args.learning_rate) lr_scheduler = PolyWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps, base_lr=args.learning_rate, device=device) grad_scaler = torch.cuda.amp.GradScaler(init_scale=args.init_loss_scale, enabled=args.fp16) model.checkpoint_activations(args.checkpoint_activations) if args.resume_from_checkpoint: # For phase2 from scratch, need to reset the learning rate and step count in the checkpoint. Else restore values in checkpoint. if (args.phase2 and not args.resume_phase2) or args.init_checkpoint : for group in checkpoint['optimizer']['param_groups'] : group['step'].zero_() group['lr'].fill_(args.learning_rate) else : if 'grad_scaler' in checkpoint and (not args.phase2 or args.resume_phase2): grad_scaler.load_state_dict(checkpoint['grad_scaler']) optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) if args.local_rank != -1: # Cuda Graphs requires that DDP is captured on a side stream # It is important to synchronize the streams after the DDP initialization # so anything after sees properly initialized model weights across GPUs side_stream = torch.cuda.Stream() with torch.cuda.stream(side_stream) : model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, bucket_cap_mb=torch.cuda.get_device_properties(device).total_memory, gradient_as_bucket_view=True) torch.cuda.current_stream().wait_stream(side_stream) from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook def scale_by_grad_accum_steps_wrapper(hook: Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]) -> Callable[[Any, dist.GradBucket], torch.futures.Future[torch.Tensor]]: def scale_by_grad_accum_steps_wrapper_hook( hook_state, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: bucket.set_buffer(bucket.buffer().div_(args.gradient_accumulation_steps)) fut = hook(hook_state, bucket) return fut return scale_by_grad_accum_steps_wrapper_hook # With gradient accumulation, the DDP comm hook divides the gradients by the number # gradient accumulation steps if args.gradient_accumulation_steps > 1: model.register_comm_hook(None, scale_by_grad_accum_steps_wrapper(allreduce_hook)) optimizer.setup_fp32_params() criterion = BertPretrainingCriterion(config.vocab_size, sequence_output_is_dense=sequence_output_is_dense) if (args.resume_from_checkpoint and not args.phase2) or (args.resume_phase2) or args.init_checkpoint: start_epoch = checkpoint.get('epoch', 0) else: start_epoch = 0 return model, optimizer, grad_scaler, lr_scheduler, checkpoint, global_step, criterion, start_epoch def checkpoint_step(args, epoch, global_step, model, optimizer, grad_scaler, last3_checkpoint_paths) : torch.cuda.synchronize() if is_main_process() and not args.skip_checkpoint: # Save a trained model dllogger.log(step="PARAMETER", data={"checkpoint_step": global_step}) model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self if args.resume_step < 0 or not args.phase2: output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)) else: output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step + args.phase1_end_step)) if args.do_train: torch.save({'model': model_to_save.state_dict(), 'optimizer': optimizer.state_dict(), 'grad_scaler': grad_scaler.state_dict(), 'epoch': epoch}, output_save_file) # The new checkpoint could have a name already in # last3_checkpoint_paths. In this case, torch.save will overwrite # the old file; thus, we need to take the name out of # last3_checkpoint_paths and append it to the last. if output_save_file in last3_checkpoint_paths: last3_checkpoint_paths.remove(output_save_file) last3_checkpoint_paths.append(output_save_file) if len(last3_checkpoint_paths) > 3: ckpt_to_be_removed = last3_checkpoint_paths.pop(0) os.remove(ckpt_to_be_removed) def take_training_step(args, grad_scaler, model, criterion, batch, stats): with torch.cuda.amp.autocast(enabled=(args.fp16 and not args.allreduce_post_accumulation_fp16)) : prediction_scores, seq_relationship_score = model(input_ids=batch['input_ids'], token_type_ids=batch['token_type_ids'], attention_mask=batch['attention_mask'], masked_lm_labels=batch['labels']) loss = criterion(prediction_scores, seq_relationship_score, batch['labels'], batch['next_sentence_labels']) stats.device_stat('average_loss').add_(loss.detach()) grad_scaler.scale(loss).backward() def take_optimizer_step(args, lr_scheduler, optimizer, grad_scaler, device, stats): lr_scheduler.step() # learning rate warmup grad_scaler.step(optimizer) # Stats copying is located here prior to the infinity check being reset # in GradScaler::update() stats.copy_from_device() grad_scaler.update() optimizer.zero_grad(set_to_none=True) def main(): global timeout_sent args = parse_arguments() random.seed(args.seed + args.local_rank) np.random.seed(args.seed + args.local_rank) torch.manual_seed(args.seed + args.local_rank) torch.cuda.manual_seed(args.seed + args.local_rank) device, args = setup_training(args) dllogger.log(step="PARAMETER", data={"Config": [str(args)]}) # Prepare optimizer model, optimizer, grad_scaler, lr_scheduler, checkpoint, global_resume_step, criterion, epoch = prepare_model_and_optimizer(args, device, sequence_output_is_dense=not args.no_dense_sequence_output) # Prepare the data loader. if is_main_process(): tic = time.perf_counter() train_dataloader = lddl.torch.get_bert_pretrain_data_loader( args.input_dir, local_rank=max(args.local_rank, 0), vocab_file=args.vocab_file, data_loader_kwargs={ 'batch_size': args.train_batch_size * args.n_gpu, 'num_workers': args.num_workers, 'pin_memory': True, }, base_seed=args.seed, log_dir=None if args.output_dir is None else os.path.join(args.output_dir, 'lddl_log'), log_level=logging.WARNING, start_epoch=epoch, ) if is_main_process(): print('get_bert_pretrain_data_loader took {} s!'.format(time.perf_counter() - tic)) if is_main_process(): dllogger.log(step="PARAMETER", data={"SEED": args.seed}) dllogger.log(step="PARAMETER", data={"train_start": True}) dllogger.log(step="PARAMETER", data={"batch_size_per_gpu": args.train_batch_size}) dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate}) model.train() most_recent_ckpts_paths = [] stats = SyncFreeStats() # Host Only Stats stats.add_stat('model_step') # Device/Host Sync-ed Stats stats.add_stat('optimizer_step', dtype=torch.int32, device_func=(lambda: optimizer.param_groups[0]['step'])) stats.add_stat('average_loss', dtype=torch.float32, device_tensor=torch.zeros(1, dtype=torch.float32, device=device)) stats.add_stat('learning_rate', dtype=torch.float32, device_func=(lambda: optimizer.param_groups[0]['lr'])) if grad_scaler.is_enabled(): # This stat only indicates a skipped step occurred. It does not accumulate the number of skipped steps stats.add_stat('skip_optimizer_step', dtype=torch.float32, device_func=(lambda: grad_scaler._found_inf_per_device(optimizer)[device])) stats.add_stat('skipped_optimizer_steps', dtype=torch.float32, device_tensor=torch.zeros(1, dtype=torch.float32, device=device), device_func=(lambda x: x.add_(grad_scaler._found_inf_per_device(optimizer)[device]))) else: stats.add_stat('skip_optimizer_step', dtype=torch.float32) stats.add_stat('skipped_optimizer_steps', dtype=torch.float32) static_gpu_batch = None full_cudagraph = None grad_accum_cudagraph = None if args.cuda_graphs: static_gpu_batch = { 'input_ids': torch.ones(args.train_batch_size, args.max_seq_length, dtype=torch.int64, device=device), 'token_type_ids': torch.ones(args.train_batch_size, args.max_seq_length, dtype=torch.int64, device=device), 'attention_mask': torch.ones(args.train_batch_size, args.max_seq_length, dtype=torch.int64, device=device), 'labels': torch.ones(args.train_batch_size, args.max_seq_length, dtype=torch.int64, device=device), 'next_sentence_labels': torch.ones(args.train_batch_size, dtype=torch.int64, device=device), } side_stream = torch.cuda.Stream() # Warmup Steps - includes jitting fusions side_stream = torch.cuda.Stream() side_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(side_stream): for _ in range(11): take_training_step(args, grad_scaler, model, criterion, static_gpu_batch, stats) take_optimizer_step(args, lr_scheduler, optimizer, grad_scaler, device, stats) torch.cuda.current_stream().wait_stream(side_stream) # Capture Graph full_cudagraph = torch.cuda.CUDAGraph() with torch.cuda.graph(full_cudagraph): take_training_step(args, grad_scaler, model, criterion, static_gpu_batch, stats) take_optimizer_step(args, lr_scheduler, optimizer, grad_scaler, device, stats) # Warmup Steps - includes jitting fusions side_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(side_stream): for _ in range(3): with model.no_sync(): take_training_step(args, grad_scaler, model, criterion, static_gpu_batch, stats) torch.cuda.current_stream().wait_stream(side_stream) # Capture Graph grad_accum_cudagraph = torch.cuda.CUDAGraph() with torch.cuda.graph(grad_accum_cudagraph): with model.no_sync(): take_training_step(args, grad_scaler, model, criterion, static_gpu_batch, stats) train_iter = tqdm( train_dataloader, desc="Iteration", disable=args.disable_progress_bar, total=len(train_dataloader), ) if is_main_process() else train_dataloader raw_train_start = None # avoid nvfuser compilation times in measuring perf with phase2 binning # ideally skip > 3 * num_bins fwd+bwd iterations to start measuring perf skip_fwd_bwd_for_perf = 4 if args.phase2: #we use 8 bins with phase2 skip_fwd_bwd_for_perf = 50 while True: for step, batch in enumerate(train_iter): # The first training step is 1 and not 0 when gradient accumulating # in order to avoid an optimizer step on the very first step stats.host_stat('model_step').add_(1) grad_accumulation_step = (stats.host_stat_value('model_step') % args.gradient_accumulation_steps) != 0 if raw_train_start is None and step == skip_fwd_bwd_for_perf: raw_train_start = time.time() # Execute Model Step if args.cuda_graphs: for k in batch.keys(): static_gpu_batch[k].copy_(batch[k], non_blocking=True) if grad_accumulation_step: grad_accum_cudagraph.replay() else: full_cudagraph.replay() else: batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} if args.allreduce_post_accumulation and grad_accumulation_step: with model.no_sync(): take_training_step(args, grad_scaler, model, criterion, batch, stats) else: take_training_step(args, grad_scaler, model, criterion, batch, stats) if not grad_accumulation_step: take_optimizer_step(args, lr_scheduler, optimizer, grad_scaler, device, stats) # Log Optimizer Step if (not grad_accumulation_step) or timeout_sent: static_optimizer_step = stats.host_stat_value('model_step') // args.gradient_accumulation_steps dynamic_optimizer_step = static_optimizer_step - int(stats.host_stat_value('skipped_optimizer_steps')) + global_resume_step no_log_steps = static_optimizer_step % args.log_freq # Log Final Step (MAYBE) # Since the stats are asynchronously pushed from the GPU to CPU, they are not always reliable # Therefore, a synchronization is required to guarantee you see the intended value. # Without a synchronization, it is possible for some GPUs to go through the exit conditional # and others to not because they accidentally see a different value for `skipped_optimizer_steps`. # In order to remove most device syncs, synchronizations only begin in the last few steps # where the skipped step count matters. if static_optimizer_step + global_resume_step >= args.steps_this_run or timeout_sent: torch.cuda.synchronize() dynamic_optimizer_step = static_optimizer_step - int(stats.host_stat_value('skipped_optimizer_steps')) + global_resume_step if dynamic_optimizer_step >= args.steps_this_run or timeout_sent: train_time_raw = time.time() - raw_train_start last_num_steps = args.log_freq if no_log_steps == 0 else no_log_steps stats.device_stat('average_loss').div_(last_num_steps * args.gradient_accumulation_steps) if (torch.distributed.is_initialized()): stats.device_stat('average_loss').div_(get_world_size()) torch.distributed.all_reduce(stats.device_stat('average_loss')) # We block on this copy to insure the final value stats.host_stat('average_loss').copy_(stats.device_stat('average_loss')) if is_main_process(): dllogger.log(step=(epoch, dynamic_optimizer_step,), data={"final_loss": stats.host_stat_value('average_loss')}) checkpoint_step(args, epoch, dynamic_optimizer_step, model, optimizer, grad_scaler, most_recent_ckpts_paths) return args, train_time_raw, stats, skip_fwd_bwd_for_perf if no_log_steps == 0: if is_main_process(): dllogger.log(step=(epoch, dynamic_optimizer_step,), data={"average_loss": stats.host_stat_value('average_loss') / (args.log_freq * args.gradient_accumulation_steps), "learning_rate": stats.host_stat_value('learning_rate'), "skipped_steps": int(stats.host_stat_value('skipped_optimizer_steps'))}) if stats.host_stat_value('skip_optimizer_step') > 0.: dllogger.log(step="PARAMETER", data={"loss_scale": grad_scaler._get_scale_async().item()}) stats.device_stat('average_loss').zero_() if not args.skip_checkpoint and (dynamic_optimizer_step % args.num_steps_per_checkpoint == 0): checkpoint_step(args, epoch, dynamic_optimizer_step, model, optimizer, grad_scaler, most_recent_ckpts_paths) epoch += 1 if __name__ == "__main__": now = time.time() args, train_time_raw, stats, skip_fwd_bwd_for_perf = main() gpu_count = args.n_gpu if torch.distributed.is_initialized(): gpu_count = get_world_size() if is_main_process(): e2e_time = time.time() - now training_perf = args.train_batch_size * gpu_count * (stats.host_stat_value('model_step') - skip_fwd_bwd_for_perf) / train_time_raw dllogger.log(step=tuple(), data={"e2e_train_time": e2e_time, "training_sequences_per_second": training_perf, "final_loss": stats.host_stat_value('average_loss'), "raw_train_time": train_time_raw }) dllogger.flush()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/run_pretraining.py
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed as dist from pathlib import Path def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def get_world_size(): if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def is_main_process(): return get_rank() == 0 def barrier(): if dist.is_available() and dist.is_initialized(): dist.barrier() def format_step(step): if isinstance(step, str): return step s = "" if len(step) > 0: s += "Training Epoch: {} ".format(step[0]) if len(step) > 1: s += "Training Iteration: {} ".format(step[1]) if len(step) > 2: s += "Validation Iteration: {} ".format(step[2]) return s def mkdir(path): Path(path).mkdir(parents=True, exist_ok=True) def mkdir_by_main_process(path): if is_main_process(): mkdir(path) barrier()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/utils.py
# coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create masked LM/next sentence masked_lm TF examples for BERT.""" from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging import os import random from io import open import h5py import numpy as np from tqdm import tqdm, trange from tokenization import BertTokenizer import tokenization as tokenization import random import collections class TrainingInstance(object): """A single training instance (sentence pair).""" def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, is_random_next): self.tokens = tokens self.segment_ids = segment_ids self.is_random_next = is_random_next self.masked_lm_positions = masked_lm_positions self.masked_lm_labels = masked_lm_labels def __str__(self): s = "" s += "tokens: %s\n" % (" ".join( [tokenization.printable_text(x) for x in self.tokens])) s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) s += "is_random_next: %s\n" % self.is_random_next s += "masked_lm_positions: %s\n" % (" ".join( [str(x) for x in self.masked_lm_positions])) s += "masked_lm_labels: %s\n" % (" ".join( [tokenization.printable_text(x) for x in self.masked_lm_labels])) s += "\n" return s def __repr__(self): return self.__str__() def write_instance_to_example_file(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_file): """Create TF example files from `TrainingInstance`s.""" total_written = 0 features = collections.OrderedDict() num_instances = len(instances) features["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") features["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32") features["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") features["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") features["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32") for inst_index, instance in enumerate(tqdm(instances)): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = [1] * len(input_ids) segment_ids = list(instance.segment_ids) assert len(input_ids) <= max_seq_length while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = [1.0] * len(masked_lm_ids) while len(masked_lm_positions) < max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = 1 if instance.is_random_next else 0 features["input_ids"][inst_index] = input_ids features["input_mask"][inst_index] = input_mask features["segment_ids"][inst_index] = segment_ids features["masked_lm_positions"][inst_index] = masked_lm_positions features["masked_lm_ids"][inst_index] = masked_lm_ids features["next_sentence_labels"][inst_index] = next_sentence_label total_written += 1 # if inst_index < 20: # tf.logging.info("*** Example ***") # tf.logging.info("tokens: %s" % " ".join( # [tokenization.printable_text(x) for x in instance.tokens])) # for feature_name in features.keys(): # feature = features[feature_name] # values = [] # if feature.int64_list.value: # values = feature.int64_list.value # elif feature.float_list.value: # values = feature.float_list.value # tf.logging.info( # "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) print("saving data") f= h5py.File(output_file, 'w') f.create_dataset("input_ids", data=features["input_ids"], dtype='i4', compression='gzip') f.create_dataset("input_mask", data=features["input_mask"], dtype='i1', compression='gzip') f.create_dataset("segment_ids", data=features["segment_ids"], dtype='i1', compression='gzip') f.create_dataset("masked_lm_positions", data=features["masked_lm_positions"], dtype='i4', compression='gzip') f.create_dataset("masked_lm_ids", data=features["masked_lm_ids"], dtype='i4', compression='gzip') f.create_dataset("next_sentence_labels", data=features["next_sentence_labels"], dtype='i1', compression='gzip') f.flush() f.close() def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): """Create `TrainingInstance`s from raw text.""" all_documents = [[]] # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. for input_file in input_files: print("creating instance from {}".format(input_file)) with open(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() # Empty lines are used as document delimiters if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) # Remove empty documents all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend( create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances def create_instances_from_document( all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates `TrainingInstance`s for a single document.""" document = all_documents[document_index] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if rng.random() < short_seq_prob: target_seq_length = rng.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = rng.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next is_random_next = False if len(current_chunk) == 1 or rng.random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = rng.randint(0, len(all_documents) - 1) if random_document_index != document_index: break #If picked random document is the same as the current document if random_document_index == document_index: is_random_next = False random_document = all_documents[random_document_index] random_start = rng.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance( tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"]) def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates the predictions for the masked LM objective.""" cand_indexes = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indexes.append(i) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) masked_lms = [] covered_indexes = set() for index in cand_indexes: if len(masked_lms) >= num_to_predict: break if index in covered_indexes: continue covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if rng.random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels) def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if rng.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--vocab_file", default=None, type=str, required=True, help="The vocabulary the BERT model will train on.") parser.add_argument("--input_file", default=None, type=str, required=True, help="The input train corpus. can be directory with .txt files or a path to a single file") parser.add_argument("--output_file", default=None, type=str, required=True, help="The output file where the model checkpoints will be written.") ## Other parameters # str parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") #int parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--dupe_factor", default=10, type=int, help="Number of times to duplicate the input data (with different masks).") parser.add_argument("--max_predictions_per_seq", default=20, type=int, help="Maximum sequence length.") # floats parser.add_argument("--masked_lm_prob", default=0.15, type=float, help="Masked LM probability.") parser.add_argument("--short_seq_prob", default=0.1, type=float, help="Probability to create a sequence shorter than maximum sequence length") parser.add_argument("--do_lower_case", action='store_true', default=True, help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument('--random_seed', type=int, default=12345, help="random seed for initialization") args = parser.parse_args() tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) input_files = [] if os.path.isfile(args.input_file): input_files.append(args.input_file) elif os.path.isdir(args.input_file): input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt') )] else: raise ValueError("{} is not a valid path".format(args.input_file)) rng = random.Random(args.random_seed) instances = create_training_instances( input_files, tokenizer, args.max_seq_length, args.dupe_factor, args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, rng) output_file = args.output_file write_instance_to_example_file(instances, tokenizer, args.max_seq_length, args.max_predictions_per_seq, output_file) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/create_pretraining_data.py
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import os import shutil import tempfile from functools import wraps from hashlib import sha256 import sys from io import open import boto3 import requests from botocore.exceptions import ClientError from tqdm import tqdm logger = logging.getLogger(__name__) # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"): import torch _torch_available = True # pylint: disable=invalid-name logger.info("PyTorch version {} available.".format(torch.__version__)) else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False except ImportError: _torch_available = False # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"): import tensorflow as tf assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2 _tf_available = True # pylint: disable=invalid-name logger.info("TensorFlow version {} available.".format(tf.__version__)) else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False except ImportError: _tf_available = False # pylint: disable=invalid-name try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', Path.home() / '.pytorch_pretrained_bert')) except AttributeError: PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert')) def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url, temp_file): req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get_tokenization_utils(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w', encoding="utf-8") as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: """ Resolve a model identifier, and a file name, to a HF-hosted url on either S3 or Cloudfront (a Content Delivery Network, or CDN). Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our bandwidth costs). However, it is more aggressively cached by default, so may not always reflect the latest changes to the underlying file (default TTL is 24 hours). In terms of client-side caching from this library, even though Cloudfront relays the ETags from S3, using one or the other (or switching from one to the other) will affect caching: cached files are not shared between the two because the cached file's name contains a hash of the url. """ S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX legacy_format = "/" not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https")
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/file_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT inference script. Does not depend on dataset. """ from __future__ import absolute_import, division, print_function import argparse import collections import json import logging import math import os import random import sys from io import open import numpy as np import torch from tqdm import tqdm, trange from types import SimpleNamespace from file_utils import PYTORCH_PRETRAINED_BERT_CACHE from modeling import BertForQuestionAnswering, BertConfig, WEIGHTS_NAME, CONFIG_NAME from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) from run_squad import _get_best_indices, _compute_softmax, get_valid_prelim_predictions, get_answer_text if sys.version_info[0] == 2: import cPickle as pickle else: import pickle logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) import math import json import numpy as np import collections def preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer, max_seq_length, max_query_length): """ converts an example into a feature """ if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # truncate if too long length = len(all_doc_tokens) length = min(length, max_tokens_for_doc) tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(length): token_to_orig_map[len(tokens)] = tok_to_orig_index[i] token_is_max_context[len(tokens)] = True tokens.append(all_doc_tokens[i]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length tensors_for_inference = { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } tensors_for_inference = SimpleNamespace(**tensors_for_inference) tokens_for_postprocessing = { 'tokens': tokens, 'token_to_orig_map': token_to_orig_map, 'token_is_max_context': token_is_max_context } tokens_for_postprocessing = SimpleNamespace(**tokens_for_postprocessing) return tensors_for_inference, tokens_for_postprocessing RawResult = collections.namedtuple("RawResult", ["start_logits", "end_logits"]) def get_answer(doc_tokens, tokens_for_postprocessing, start_logits, end_logits, args): result = RawResult(start_logits=start_logits, end_logits=end_logits) predictions = [] Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit']) if args.version_2_with_negative: null_val = (float("inf"), 0, 0) start_indices = _get_best_indices(result.start_logits, args.n_best_size) end_indices = _get_best_indices(result.end_logits, args.n_best_size) prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, tokens_for_postprocessing, result, args) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True ) if args.version_2_with_negative: score = result.start_logits[0] + result.end_logits[0] if score < null_val[0]: null_val = (score, result.start_logits[0], result.end_logits[0]) doc_tokens_obj = { 'doc_tokens': doc_tokens, } doc_tokens_obj = SimpleNamespace(**doc_tokens_obj) curr_predictions = [] seen_predictions = [] for pred in prelim_predictions: if len(curr_predictions) == args.n_best_size: break if pred.end_index > 0: # this is a non-null prediction final_text = get_answer_text(doc_tokens_obj, tokens_for_postprocessing, pred, args) if final_text in seen_predictions: continue else: final_text = "" seen_predictions.append(final_text) curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit)) predictions += curr_predictions # add empty prediction if args.version_2_with_negative: predictions.append(Prediction('', null_val[1], null_val[2])) nbest_answers = [] answer = None nbest = sorted(predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)[:args.n_best_size] total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry and entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_answers.append(output) if args.version_2_with_negative: score_diff = null_val[0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit if score_diff > args.null_score_diff_threshold: answer = "" else: answer = best_non_null_entry.text else: answer = nbest_answers[0]['text'] return answer, nbest_answers def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--init_checkpoint", default=None, type=str, required=True, help="The checkpoint file from pretraining") ## Other parameters parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. ") parser.add_argument("--seed", default=1, type=int) parser.add_argument("--question", default="Most antibiotics target bacteria and don't affect what class of organisms? ", type=str, help="question") parser.add_argument("--context", default="Within the genitourinary and gastrointestinal tracts, commensal flora serve as biological barriers by competing with pathogenic bacteria for food and space and, in some cases, by changing the conditions in their environment, such as pH or available iron. This reduces the probability that pathogens will reach sufficient numbers to cause illness. However, since most antibiotics non-specifically target bacteria and do not affect fungi, oral antibiotics can lead to an overgrowth of fungi and cause conditions such as a vaginal candidiasis (a yeast infection). There is good evidence that re-introduction of probiotic flora, such as pure cultures of the lactobacilli normally found in unpasteurized yogurt, helps restore a healthy balance of microbial populations in intestinal infections in children and encouraging preliminary data in studies on bacterial gastroenteritis, inflammatory bowel diseases, urinary tract infection and post-surgical infections. ", type=str, help="context") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--n_best_size", default=1, type=int, help="The total number of n-best predictions to generate. ") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, then the model can reply with "unknown". ') parser.add_argument('--null_score_diff_threshold', type=float, default=-11.0, help="If null_score - best_non_null is greater than the threshold predict 'unknown'. ") parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") parser.add_argument("--config_file", default=None, type=str, required=True, help="The BERT model config") parser.add_argument('--fp16', action='store_true', help="use mixed-precision") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large # Prepare model config = BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) # initialize model model = BertForQuestionAnswering(config) model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"]) model.to(device) if args.fp16: model.half() model.eval() print("question: ", args.question) print("context: ", args.context) print() # preprocessing doc_tokens = args.context.split() query_tokens = tokenizer.tokenize(args.question) feature = preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer, max_seq_length=args.max_seq_length, max_query_length=args.max_query_length) tensors_for_inference, tokens_for_postprocessing = feature input_ids = torch.tensor(tensors_for_inference.input_ids, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(tensors_for_inference.segment_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(tensors_for_inference.input_mask, dtype=torch.long).unsqueeze(0) # load tensors to device input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) # run prediction with torch.no_grad(): start_logits, end_logits = model(input_ids, segment_ids, input_mask) # post-processing start_logits = start_logits[0].detach().cpu().tolist() end_logits = end_logits[0].detach().cpu().tolist() answer, answers = get_answer(doc_tokens, tokens_for_postprocessing, start_logits, end_logits, args) # print result print() print(answer) print() print(json.dumps(answers, indent=4)) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/inference.py
# coding=utf-8 # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import absolute_import, division, print_function, unicode_literals import copy import json import logging import math import os import shutil import tarfile import tempfile import sys from io import open from typing import Final import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils import checkpoint sys.path.append('/workspace/bert/') from file_utils import cached_path from torch.nn import Module from torch.nn.parameter import Parameter import torch.nn.functional as F import torch.nn.init as init from collections import OrderedDict logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'config.json' WEIGHTS_NAME = 'pytorch_model.bin' TF_WEIGHTS_NAME = 'model.ckpt' def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.ascontiguousarray(np.transpose(array)) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): return torch.nn.functional.gelu(x, approximate=True) def swish(x): return x * torch.sigmoid(x) #torch.nn.functional.gelu(x) # Breaks ONNX export ACT2FN = {"gelu": gelu, "tanh": torch.tanh, "relu": torch.nn.functional.relu, "swish": swish} class LinearActivation(Module): r"""Fused Linear and activation Module. """ __constants__ = ['bias'] def __init__(self, in_features, out_features, act='gelu', bias=True): super(LinearActivation, self).__init__() self.in_features = in_features self.out_features = out_features self.bias = None assert act in ACT2FN, "Activation function is not found in activation dictionary." self.act_fn = ACT2FN[act] self.weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): #if not self.bias is None: # return self.biased_act_fn(self.bias, F.linear(input, self.weight, None)) #else: return self.act_fn(F.linear(input, self.weight, self.bias)) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format( self.in_features, self.out_features, self.bias is not None ) class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, output_all_encoded_layers=False): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.output_all_encoded_layers = output_all_encoded_layers else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string()) class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ distillation : Final[bool] def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config else : self.distill_config = {'use_embedding_states' : False } def forward(self, input_ids, token_type_ids): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) if self.distillation: if self.distill_config["use_embedding_states"]: self.distill_state_dict["embedding_states"] = embeddings return embeddings class BertSelfAttention(nn.Module): distillation : Final[bool] def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) #Distillation specific self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config else : self.distill_config = { 'use_attention_scores' : False, 'use_value_states' : False } def transpose_for_scores(self, x): # seq: x.size(0), bsz: x.size(0) x = x.view(x.size(0), x.size(1) * self.num_attention_heads, self.attention_head_size).transpose(0, 1) return x def transpose_key_for_scores(self, x): # seq: x.size(0), bsz: x.size(0) x = x.view(x.size(0), x.size(1) * self.num_attention_heads, self.attention_head_size).permute(1, 2, 0) return x def forward(self, hidden_states, attention_mask): # (seq, bsz, hidden) batch_size = hidden_states.size(1) seq_length = hidden_states.size(0) mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.bmm(query_layer, key_layer) # (bsz, heads, seq, seq) attention_scores = attention_scores.view(batch_size, self.num_attention_heads, seq_length, seq_length) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = F.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. # (bsz, heads, seq, seq) attention_probs = self.dropout(attention_probs) attention_probs = attention_probs.view(batch_size * self.num_attention_heads, seq_length, seq_length) context_layer = torch.bmm(attention_probs, value_layer) context_layer = context_layer.transpose(0, 1).contiguous() # (seq, bsz, hidden) context_layer = context_layer.view(seq_length, batch_size, self.all_head_size) #Cache states if running distillation if self.distillation: if self.distill_config["use_attention_scores"]: self.distill_state_dict["attention_scores"] = attention_scores if self.distill_config["use_value_states"]: self.distill_state_dict["value_states"] = context_layer return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): distillation : Final[bool] def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) #Distillation specific self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config else : self.distill_config = {'use_hidden_states' : False} def forward(self, hidden_states, attention_mask): attention_output = self.attention(hidden_states, attention_mask) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) #Cache states if running distillation if self.distillation: if self.distill_config["use_hidden_states"]: self.distill_state_dict["hidden_states"] = layer_output return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.output_all_encoded_layers = config.output_all_encoded_layers self._checkpoint_activations = False @torch.jit.unused def checkpointed_forward(self, hidden_states, attention_mask): def custom(start, end): def custom_forward(*inputs): layers = self.layer[start:end] x_ = inputs[0] for layer in layers: x_ = layer(x_, inputs[1]) return x_ return custom_forward l = 0 num_layers = len(self.layer) chunk_length = math.ceil(math.sqrt(num_layers)) while l < num_layers: hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1) l += chunk_length return hidden_states def forward(self, hidden_states, attention_mask): all_encoder_layers = [] if self._checkpoint_activations: hidden_states = self.checkpointed_forward(hidden_states, attention_mask) else: # (bsz, seq, hidden) => (seq, bsz, hidden) hidden_states = hidden_states.transpose(0, 1) for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states, attention_mask) if self.output_all_encoded_layers: all_encoder_layers.append(hidden_states) # The hidden states need to be contiguous at this point to enable # dense_sequence_output # (seq, bsz, hidden) => (bsz, seq, hidden) hidden_states = hidden_states.transpose(0, 1).contiguous() if not self.output_all_encoded_layers or self._checkpoint_activations: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense_act(first_token_tensor) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): sequence_output_is_dense: Final[bool] def __init__(self, config, bert_model_embedding_weights, sequence_output_is_dense=False): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) self.sequence_output_is_dense = sequence_output_is_dense def forward(self, sequence_output, pooled_output, masked_lm_labels): if self.sequence_output_is_dense: # We are masking out elements that won't contribute to loss because of masked lm labels sequence_flattened = torch.index_select(sequence_output.view(-1,sequence_output.shape[-1]), 0, torch.nonzero(masked_lm_labels.view(-1) != -1).squeeze()) prediction_scores = self.predictions(sequence_flattened) else: prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(BertPreTrainedModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @torch.jit.ignore def checkpoint_activations(self, val): def _apply_flag(module): if hasattr(module, "_checkpoint_activations"): module._checkpoint_activations=val self.apply(_apply_flag) def enable_apex(self, val): def _apply_flag(module): if hasattr(module, "apex_enabled"): module.apex_enabled=val self.apply(_apply_flag) @classmethod def from_scratch(cls, pretrained_model_name_or_path, distill_config=None, pooler=True, *inputs, **kwargs): resolved_config_file = os.path.join( pretrained_model_name_or_path, CONFIG_NAME) config = BertConfig.from_json_file(resolved_config_file) #Load distillation specific config if distill_config: distill_config = json.load(open(distill_config, "r")) distill_config["distillation_config"]["use_pooler"] = pooler config.__dict__.update(distill_config) logger.info("Model config {}".format(config)) model = cls(config, *inputs, **kwargs) return model, config @classmethod def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, distill_config=None, pooler=True, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) #Load distillation specific config if distill_config: distill_config = json.load(open(distill_config, "r")) distill_config["distillation_config"]["use_pooler"] = pooler config.__dict__.update(distill_config) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu') if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if 'intermediate.dense.' in key: new_key = key.replace('intermediate.dense.', 'intermediate.dense_act.') if 'pooler.dense.' in key: new_key = key.replace('pooler.dense.', 'pooler.dense_act.') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model, config class BertModel(BertPreTrainedModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = modeling.BertModel(config=config) all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) ``` """ distillation : Final[bool] teacher : Final[bool] def __init__(self, config): super(BertModel, self).__init__(config) # Distillation specific self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config else : self.distill_config = {'use_pooler' : False, 'use_pred_states' : False} self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) # Use pooler if not running distillation or distill_config["use_pooler"] is set to True if not self.distillation or (self.distill_config["use_pooler"] and self.distill_config["use_pred_states"]): self.pooler = BertPooler(config) self.apply(self.init_bert_weights) self.output_all_encoded_layers = config.output_all_encoded_layers self.teacher = False def forward(self, input_ids, token_type_ids, attention_mask): # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask) sequence_output = encoded_layers[-1] # Use pooler if not running distillation or distill_config["use_pooler"] is set to True if not self.distillation or (self.distill_config["use_pooler"] and self.distill_config["use_pred_states"]): pooled_output = self.pooler(sequence_output) else: pooled_output = None if not self.output_all_encoded_layers: encoded_layers = encoded_layers[-1:] if not self.teacher: return encoded_layers, pooled_output def make_teacher(self, ): self.teacher = True class BertForPreTraining(BertPreTrainedModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ distillation : Final[bool] def __init__(self, config, sequence_output_is_dense=False): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.distillation = getattr(config, 'distillation', False) if not self.distillation: self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight, sequence_output_is_dense) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids, attention_mask, masked_lm_labels): # if self.distillation: # self.bert(input_ids, token_type_ids, attention_mask) # else: encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) if not self.distillation: sequence_output = encoded_layers[-1] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output, masked_lm_labels) return prediction_scores, seq_relationship_score class BertForMaskedLM(BertPreTrainedModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is not `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None): encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) sequence_output = encoded_layers[-1] prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(BertPreTrainedModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) seq_relationship_score = self.cls( pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(BertPreTrainedModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ distillation : Final[bool] def __init__(self, config, num_labels): super(BertForSequenceClassification, self).__init__(config) #Distillation specific self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config self.num_labels = num_labels self.bert = BertModel(config) if not self.distillation or self.distill_config["use_pred_states"]: self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) if not self.distillation or self.distill_config["use_pred_states"]: pooled_output = self.dropout(pooled_output) #pooled_output = torch.relu(pooled_output) final_output = self.classifier(pooled_output) if self.distillation: if self.distill_config["use_pred_states"]: self.distill_state_dict["pred_states"] = final_output if not self.distillation or not self.training: return final_output class BertForMultipleChoice(BertPreTrainedModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(BertPreTrainedModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) sequence_output = encoded_layers[-1] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(BertPreTrainedModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. Outputs: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ distillation : Final[bool] def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) #Distillation specific self.distillation = getattr(config, 'distillation', False) if self.distillation: self.distill_state_dict = OrderedDict() self.distill_config = config.distillation_config else : self.distill_config = {'use_pred_states' : False } self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) if not self.distillation or self.distill_config["use_pred_states"]: self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids, attention_mask): encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) if not self.distillation or self.distill_config["use_pred_states"]: sequence_output = encoded_layers[-1] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if self.distillation: if self.distill_config["use_pred_states"]: self.distill_state_dict["pred_states"] = [start_logits, end_logits] if not self.distillation or not self.training: return start_logits, end_logits class Project(Module): """ nn module to project student layers to a specific size """ def __init__(self, student_config, teacher_config): super(Project, self).__init__() self.student_config = student_config self.teacher_config = teacher_config self.fit_dense = nn.Linear(self.student_config.hidden_size, self.teacher_config.hidden_size) self.apply(self.init_weights) def forward(self, student_tensor_list): """ student_tensor : [List] of tensors to be projects """ projected = [] for student_tensor in student_tensor_list: projected.append(self.fit_dense(student_tensor)) return projected def init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.student_config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" f = open(json_file, "r", encoding='utf-8') return json.load(f)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/modeling.py
# coding=utf-8 # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" import argparse import csv import logging import os import random import sys from io import open import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from file_utils import PYTORCH_PRETRAINED_BERT_CACHE from modeling import BertForMultipleChoice, BertConfig, WEIGHTS_NAME, CONFIG_NAME from optimization import BertAdam, warmup_linear from tokenization import BertTokenizer torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class SwagExample(object): """A single training/test example for the SWAG dataset.""" def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label = None): self.swag_id = swag_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "swag_id: {}".format(self.swag_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_swag_examples(input_file, is_training): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ SwagExample( swag_id = line[2], context_sentence = line[4], start_ending = line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], label = int(line[11]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in enumerate(examples): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .csv files (or other data files) for the task.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") parser.add_argument("--init_checkpoint", default=None, type=str, required=True, help="The checkpoint file from pretraining") ## Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1.0, type=float, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--amp', default=False, action='store_true', help="Mixed precision training") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() args.fp16 = args.fp16 or args.amp if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir): print("WARNING: Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True) num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model model = BertForMultipleChoice.from_pretrained(args.bert_model, cache_dir=os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)), num_choices=4) checkpoint = torch.load(args.init_checkpoint, map_location='cpu') checkpoint = checkpoint["model"] if "model" in checkpoint.keys() else checkpoint model.load_state_dict(checkpoint, strict=False) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.contrib.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, tokenizer, args.max_seq_length, True) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): # Terminate early for benchmarking if args.max_steps > 0 and global_step > args.max_steps: break batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.fp16 and args.loss_scale != 1.0: # rescale loss for fp16 training # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html loss = loss * args.loss_scale if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if args.fp16: optimizer.backward(loss) else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_train: # Save a trained model and the associated configuration model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string()) # Load a trained model and config that you have fine-tuned config = BertConfig(output_config_file) model = BertForMultipleChoice(config, num_choices=4) model.load_state_dict(torch.load(output_model_file)) else: model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4) model.load_state_dict(checkpoint, strict=False) model.to(device) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True) eval_features = convert_examples_to_features( eval_examples, tokenizer, args.max_seq_length, True) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': tr_loss/nb_tr_steps} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/run_swag.py
# Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch from torch.optim.optimizer import Optimizer from torch.optim.lr_scheduler import _LRScheduler class LRScheduler(_LRScheduler): def __init__(self, optimizer, last_epoch=-1): # Check if using mixed precision training self.mixed_training = False base_optimizer = optimizer # Check that optimizer param is valid if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) super(LRScheduler, self).__init__(base_optimizer, last_epoch) def step(self, epoch=None): # Set the current training step # ('epoch' is used to be consistent with _LRScheduler) if self.mixed_training: # The assumption is that the step will be constant state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]] if 'step' in state_dict: self.last_epoch = state_dict['step'] + 1 else: self.last_epoch = 1 else: self.last_epoch = epoch if epoch is not None else self.last_epoch + 1 for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr class ConstantLR(LRScheduler): def get_lr(self): return self.base_lrs class CosineWarmUpScheduler(LRScheduler): """ Applies a warm up period to the learning rate. """ def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): self.warmup = warmup self.total_steps = total_steps super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): progress = self.last_epoch / self.total_steps if progress < self.warmup: return [base_lr * progress / self.warmup for base_lr in self.base_lrs] else: return [base_lr * (0.5 * (1.0 + torch.cos(math.pi + progress))) for base_lr in self.base_lrs] class ConstantWarmUpScheduler(LRScheduler): """ Applies a warm up period to the learning rate. """ def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): self.warmup = warmup self.total_steps = total_steps super(ConstantWarmUpScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): progress = self.last_epoch / self.total_steps if progress < self.warmup: return [base_lr * progress / self.warmup for base_lr in self.base_lrs] else: return self.base_lrs class LinearWarmUpScheduler(LRScheduler): """ Applies a warm up period to the learning rate. """ def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): self.warmup = warmup self.total_steps = total_steps super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch) def get_lr(self): progress = self.last_epoch / self.total_steps if progress < self.warmup: return [base_lr * progress / self.warmup for base_lr in self.base_lrs] else: return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs] class PolyWarmUpScheduler(LRScheduler): """ Applies a warm up period to the learning rate. """ def __init__(self, optimizer, warmup, total_steps, degree=0.5, last_epoch=-1, base_lr=1., device='cpu'): self.warmup = torch.tensor(warmup, device=device) self.total_steps = torch.tensor(total_steps, device=device) self.degree = torch.tensor(degree, device=device) device_last_epoch = torch.tensor(last_epoch, device=device) self.base_lr = torch.tensor(base_lr, device=device) self.device = device super(PolyWarmUpScheduler, self).__init__(optimizer, device_last_epoch) def step(self, epoch=None): param_group = self.optimizer.param_groups[0] if 'step' in param_group: self.last_epoch = param_group['step'] + 1 else: self.last_epoch = torch.tensor(1., device=self.device) for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def get_lr(self): progress = self.last_epoch / self.total_steps lr_tensor = torch.where(progress < self.warmup, self.base_lr * progress / self.warmup, self.base_lr * ((1.0 - progress) ** self.degree)) return [lr_tensor for _ in range(len(self.optimizer.param_groups))]
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/schedulers.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict class DistillHooks(): """Implements hooks that can extract any intermediate output/state in a model's forward pass for distillation. """ def __init__(self, config): """ Intermediate states extracted by `self.child_to_main_hook` are saved in `module.distill_states_dict` Intermediate nn.Module states extracted by `self.nn_module_hook` as listed in `self.nn_module_states` are saved in `self.nn_module_states` """ #list of nn_module_names to register extraction hooks on in `self.register_nn_module_hook` self.nn_module_names = config["nn_module_names"] #Dictionary to store states extracted from nn module using `self.nn_module_hook` self.nn_module_states = {} def nn_module_hook(self, name): """ Method to cache output on nn.Module(s) """ def hook(module, input, output): self.nn_module_states[name] = output return hook def register_nn_module_hook(self, module, input): """ Method to register hook on nn.module directly. With this method, user can obtain output from nn.module without having to explicity add lines to cache state in the nn.module itself, or if user has no access to the `fwd` method of the module Example: models from torchvision Typically used in models where model definition or the forward pass in inaccessible. Intermediate states will be stored in self.nn_module_state with key being the name of the module. Hook has to be deleted after the very first forward pass to avoid registering `nn_module_hook` on modules listed in `self.nn_module_names` with every fwd pass Example: model = MyModel() distill_hooks = DistillHooks(config) model_pre_hook = model.register_forward_pre_hook(distill_hooks.register_nn_module_hook) for idx, batch in enumerate(train_dataloader): if idx == 1: model_pre_hook.remove() """ for name, i in module.named_modules(): if name in self.nn_module_names: i.register_forward_hook(self.nn_module_hook(name)) print("registered `nn_module_hook` on", name) def child_to_main_hook(self, module, input, output): """ Method to recursively fetch all intermediate states cached in children modules and store in parent module """ module.distill_states_dict = OrderedDict() for name, i in module.named_modules(): if hasattr(i, 'distill_state_dict'): module.distill_states_dict[name] = i.distill_state_dict def flatten_states(state_dict, state_name): """ Method to iterate across all intermediate states cached in a dictionary, extract a certain state based on `state_name` and append to a list """ extracted_states = [] for key, value in state_dict.items(): if state_name in value: extracted_states.append(value[state_name]) return extracted_states
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/hooks.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team and Huawei Noah's Ark Lab. # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys import json import math import time import numpy as np import torch from collections import namedtuple from tempfile import TemporaryDirectory from pathlib import Path from torch.utils.data import (DataLoader, RandomSampler,Dataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.nn import MSELoss sys.path.append('/workspace/bert/') from modeling import BertForPreTraining, BertModel, Project, WEIGHTS_NAME, CONFIG_NAME from schedulers import LinearWarmUpScheduler, ConstantLR from tokenization_utils import BertTokenizer from apex.optimizers import FusedAdam from hooks import * from losses import * from utils import data_utils from utils.utils import is_main_process, get_rank, get_world_size, unwrap_ddp, set_seed from itertools import chain csv.field_size_limit(sys.maxsize) import lddl.torch # This is used for running on Huawei Cloud. oncloud = True try: import moxing as mox except: oncloud = False logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, stream=sys.stdout) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--input_dir", type=str, required=True) parser.add_argument("--teacher_model", default=None, type=str, required=True) parser.add_argument("--student_model", default=None, type=str, required=True) parser.add_argument("--output_dir", default=None, type=str, required=True) parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") # Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--reduce_memory", action="store_true", help="Store training data as on-disc memmaps to massively reduce memory usage") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay') parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--steps_per_epoch', type=int, default=-1, help="Number of updates steps to in one epoch.") parser.add_argument('--max_steps', type=int, default=-1, help="Number of training steps.") parser.add_argument('--amp', action='store_true', default=False, help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--continue_train', action='store_true', default=False, help='Whether to train from checkpoints') parser.add_argument('--disable_progress_bar', default=False, action='store_true', help='Disable tqdm progress bar') parser.add_argument('--max_grad_norm', type=float, default=1., help="Gradient Clipping threshold") # Additional arguments parser.add_argument('--eval_step', type=int, default=1000) # This is used for running on Huawei Cloud. parser.add_argument('--data_url', type=str, default="") #Distillation specific parser.add_argument('--value_state_loss', action='store_true', default=False) parser.add_argument('--hidden_state_loss', action='store_true', default=False) parser.add_argument('--use_last_layer', action='store_true', default=False) parser.add_argument('--use_kld', action='store_true', default=False) parser.add_argument('--use_cosine', action='store_true', default=False) parser.add_argument('--distill_config', default="distillation_config.json", type=str, help="path the distillation config") parser.add_argument('--num_workers', type=int, default=4, help='number of DataLoader worker processes per rank') args = parser.parse_args() logger.info('args:{}'.format(args)) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, stream=sys.stdout) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.amp)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) # Reference params author_gbs = 256 author_steps_per_epoch = 22872 author_epochs = 3 author_max_steps = author_steps_per_epoch * author_epochs # Compute present run params if args.max_steps == -1 or args.steps_per_epoch == -1: args.steps_per_epoch = author_steps_per_epoch * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps) args.max_steps = author_max_steps * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps) #Set seed set_seed(args.seed, n_gpu) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.teacher_model, do_lower_case=args.do_lower_case) teacher_model, teacher_config = BertModel.from_pretrained(args.teacher_model, distill_config=args.distill_config) # Required to make sure model's fwd doesn't return anything. required for DDP. # fwd output not being used in loss computation crashes DDP teacher_model.make_teacher() if args.continue_train: student_model, student_config = BertForPreTraining.from_pretrained(args.student_model, distill_config=args.distill_config) else: student_model, student_config = BertForPreTraining.from_scratch(args.student_model, distill_config=args.distill_config) # We need a projection layer since teacher.hidden_size != student.hidden_size use_projection = student_config.hidden_size != teacher_config.hidden_size if use_projection: project = Project(student_config, teacher_config) if args.continue_train: project_model_file = os.path.join(args.student_model, "project.bin") project_ckpt = torch.load(project_model_file, map_location="cpu") project.load_state_dict(project_ckpt) distill_config = {"nn_module_names": []} #Empty list since we don't want to use nn module hooks here distill_hooks_student, distill_hooks_teacher = DistillHooks(distill_config), DistillHooks(distill_config) student_model.register_forward_hook(distill_hooks_student.child_to_main_hook) teacher_model.register_forward_hook(distill_hooks_teacher.child_to_main_hook) ## Register hooks on nn.Modules # student_fwd_pre_hook = student_model.register_forward_pre_hook(distill_hooks_student.register_nn_module_hook) # teacher_fwd_pre_hook = teacher_model.register_forward_pre_hook(distill_hooks_teacher.register_nn_module_hook) student_model.to(device) teacher_model.to(device) if use_projection: project.to(device) if args.local_rank != -1: teacher_model = torch.nn.parallel.DistributedDataParallel( teacher_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) student_model = torch.nn.parallel.DistributedDataParallel( student_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if use_projection: project = torch.nn.parallel.DistributedDataParallel( project, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) size = 0 for n, p in student_model.named_parameters(): logger.info('n: {}'.format(n)) logger.info('p: {}'.format(p.nelement())) size += p.nelement() logger.info('Total parameters: {}'.format(size)) # Prepare optimizer param_optimizer = list(student_model.named_parameters()) if use_projection: param_optimizer += list(project.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps) global_step = 0 logging.info("***** Running training *****") logging.info(" Num examples = {}".format(args.train_batch_size * args.max_steps)) logging.info(" Batch size = %d", args.train_batch_size) logging.info(" Num steps = %d", args.max_steps) # Prepare the data loader. if is_main_process(): tic = time.perf_counter() train_dataloader = lddl.torch.get_bert_pretrain_data_loader( args.input_dir, local_rank=args.local_rank, vocab_file=args.vocab_file, data_loader_kwargs={ 'batch_size': args.train_batch_size * n_gpu, 'num_workers': args.num_workers, 'pin_memory': True, }, base_seed=args.seed, log_dir=None if args.output_dir is None else os.path.join(args.output_dir, 'lddl_log'), log_level=logging.WARNING, start_epoch=0, ) if is_main_process(): print('get_bert_pretrain_data_loader took {} s!'.format(time.perf_counter() - tic)) train_dataloader = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0. nb_tr_examples, local_step = 0, 0 student_model.train() scaler = torch.cuda.amp.GradScaler() transformer_losses = TransformerLosses(student_config, teacher_config, device, args) iter_start = time.time() while global_step < args.max_steps: for batch in train_dataloader: if global_step >= args.max_steps: break #remove forward_pre_hook after one forward pass #the purpose of forward_pre_hook is to register #forward_hooks on nn_module_names provided in config # if idx == 1: # student_fwd_pre_hook.remove() # teacher_fwd_pre_hook.remove() # # return # Initialize loss metrics if global_step % args.steps_per_epoch == 0: tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0. mean_loss, mean_att_loss, mean_rep_loss, mean_value_loss = 0., 0., 0., 0. batch = {k: v.to(device) for k, v in batch.items()} input_ids, segment_ids, input_mask, lm_label_ids, is_next = batch['input_ids'], batch['token_type_ids'], batch['attention_mask'], batch['labels'], batch['next_sentence_labels'] att_loss = 0. rep_loss = 0. value_loss = 0. with torch.cuda.amp.autocast(enabled=args.amp): student_model(input_ids, segment_ids, input_mask, None) # Gather student states extracted by hooks temp_model = unwrap_ddp(student_model) student_atts = flatten_states(temp_model.distill_states_dict, "attention_scores") student_reps = flatten_states(temp_model.distill_states_dict, "hidden_states") student_values = flatten_states(temp_model.distill_states_dict, "value_states") student_embeddings = flatten_states(temp_model.distill_states_dict, "embedding_states") bsz, attn_heads, seq_len, _ = student_atts[0].shape #No gradient for teacher training with torch.no_grad(): teacher_model(input_ids, segment_ids, input_mask) # Gather teacher states extracted by hooks temp_model = unwrap_ddp(teacher_model) teacher_atts = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "attention_scores")] teacher_reps = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "hidden_states")] teacher_values = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "value_states")] teacher_embeddings = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "embedding_states")] teacher_layer_num = len(teacher_atts) student_layer_num = len(student_atts) #MiniLM if student_config.distillation_config["student_teacher_layer_mapping"] == "last_layer": if student_config.distillation_config["use_attention_scores"]: student_atts = [student_atts[-1]] new_teacher_atts = [teacher_atts[-1]] if student_config.distillation_config["use_value_states"]: student_values = [student_values[-1]] new_teacher_values = [teacher_values[-1]] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[-1]] new_student_reps = [student_reps[-1]] else: assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) if student_config.distillation_config["use_attention_scores"]: new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_value_states"]: new_teacher_values = [teacher_values[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] new_student_reps = student_reps if student_config.distillation_config["use_attention_scores"]: att_loss = transformer_losses.compute_loss(student_atts, new_teacher_atts, loss_name="attention_loss") if student_config.distillation_config["use_hidden_states"]: if use_projection: rep_loss = transformer_losses.compute_loss(project(new_student_reps), new_teacher_reps, loss_name="hidden_state_loss") else: rep_loss = transformer_losses.compute_loss(new_student_reps, new_teacher_reps, loss_name="hidden_state_loss") if student_config.distillation_config["use_embedding_states"]: if use_projection: rep_loss += transformer_losses.compute_loss(project(student_embeddings), teacher_embeddings, loss_name="embedding_state_loss") else: rep_loss += transformer_losses.compute_loss(student_embeddings, teacher_embeddings, loss_name="embedding_state_loss") if student_config.distillation_config["use_value_states"]: value_loss = transformer_losses.compute_loss(student_values, new_teacher_values, loss_name="value_state_loss") loss = att_loss + rep_loss + value_loss if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_att_loss += att_loss.item() / args.gradient_accumulation_steps if student_config.distillation_config["use_hidden_states"]: tr_rep_loss += rep_loss.item() / args.gradient_accumulation_steps if student_config.distillation_config["use_value_states"]: tr_value_loss += value_loss.item() / args.gradient_accumulation_steps if args.amp: scaler.scale(loss).backward() scaler.unscale_(optimizer) else: loss.backward() if use_projection: torch.nn.utils.clip_grad_norm_(chain(student_model.parameters(), project.parameters()), args.max_grad_norm, error_if_nonfinite=False) else: torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm, error_if_nonfinite=False) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) local_step += 1 if local_step % args.gradient_accumulation_steps == 0: scheduler.step() if args.amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() global_step = optimizer.param_groups[0]["step"] if "step" in optimizer.param_groups[0] else 0 if (global_step % args.steps_per_epoch) > 0: mean_loss = tr_loss / (global_step % args.steps_per_epoch) mean_att_loss = tr_att_loss / (global_step % args.steps_per_epoch) mean_rep_loss = tr_rep_loss / (global_step % args.steps_per_epoch) value_loss = tr_value_loss / (global_step % args.steps_per_epoch) if (global_step + 1) % args.eval_step == 0 and is_main_process(): result = {} result['global_step'] = global_step result['lr'] = optimizer.param_groups[0]["lr"] result['loss'] = mean_loss result['att_loss'] = mean_att_loss result['rep_loss'] = mean_rep_loss result['value_loss'] = value_loss result['perf'] = (global_step + 1) * get_world_size() * args.train_batch_size * args.gradient_accumulation_steps / (time.time() - iter_start) output_eval_file = os.path.join(args.output_dir, "log.txt") if is_main_process(): with open(output_eval_file, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) # Save a trained model model_name = "{}".format(WEIGHTS_NAME) logging.info("** ** * Saving fine-tuned model ** ** * ") # Only save the model it-self model_to_save = student_model.module if hasattr(student_model, 'module') else student_model if use_projection: project_to_save = project.module if hasattr(project, 'module') else project output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) output_project_file = os.path.join(args.output_dir, "project.bin") torch.save(model_to_save.state_dict(), output_model_file) if use_projection: torch.save(project_to_save.state_dict(), output_project_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url) model_name = "{}".format(WEIGHTS_NAME) logging.info("** ** * Saving fine-tuned model ** ** * ") model_to_save = student_model.module if hasattr(student_model, 'module') else student_model if use_projection: project_to_save = project.module if hasattr(project, 'module') else project output_project_file = os.path.join(args.output_dir, "project.bin") if is_main_process(): torch.save(project_to_save.state_dict(), output_project_file) output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) if is_main_process(): torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url) if __name__ == "__main__": start_time = time.time() main() print("Total time taken:", time.time() - start_time)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/general_distill.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch.nn import MSELoss, KLDivLoss, CosineEmbeddingLoss import math class TransformerLosses(): """Implements transformer specific loss functions for Knowledge Distillation. """ def __init__(self, student_config, teacher_config, device, args): self.mse_loss = MSELoss() self.kl_loss = KLDivLoss(reduction='batchmean') self.cosine_loss = CosineEmbeddingLoss() self.distill_config = student_config.distillation_config self.device = device self.student_config = student_config self.teacher_config = teacher_config self.batch_size = args.train_batch_size def compute_loss_(self, pred, target, loss_name): if self.distill_config[loss_name] == "mse": return self.mse_loss(pred, target) elif self.distill_config[loss_name] == "kld": seq_length = pred.size(0) if loss_name == "value_state_loss" else pred.size(-1) if loss_name == "value_state_loss": dk_student = pred.shape[-1] // self.student_config.num_attention_heads dk_teacher = target.shape[-1] // self.teacher_config.num_attention_heads # Old: (bsz, seq, heads * dk) => (bsz, heads, seq, dk) # New: (seq, bsz, heads * dk) => (bsz * heads, seq, dk) student_values = pred.view(seq_length, self.batch_size * self.student_config.num_attention_heads, dk_student) student_values = student_values.transpose(0, 1) teacher_values = target.view(seq_length, self.batch_size * self.teacher_config.num_attention_heads, dk_teacher) teacher_values = teacher_values.transpose(0, 1) # (..., seq, dk) x (..., dk, seq) pred = torch.bmm(student_values, student_values.transpose(1, 2)) / math.sqrt(dk_student) target = torch.bmm(teacher_values, teacher_values.transpose(1, 2)) / math.sqrt(dk_teacher) pred = pred.view(self.batch_size, self.student_config.num_attention_heads, seq_length, seq_length) target = target.view(self.batch_size, self.teacher_config.num_attention_heads, seq_length, seq_length) return self.kl_loss(torch.nn.LogSoftmax(dim=-1)(pred), torch.nn.Softmax(dim=-1)(target)) / ( self.student_config.num_attention_heads * seq_length) elif self.distill_config[loss_name] == "cosine": # seq_length = pred.size(0) # return self.cosine_loss(pred.transpose(0, 2).reshape(-1, seq_length), # target.transpose(0, 2).reshape(-1, seq_length), # torch.tensor([1]).to(self.device)) return self.cosine_loss(pred.view(-1, self.teacher_config.hidden_size), target.view(-1, self.teacher_config.hidden_size), torch.tensor([1]).to(self.device)) else: error_string = "'attention_loss':{} not defined. Choose among 'mse', 'cosine' or 'kld'".format( self.distill_config["attention_loss"]) raise ValueError(error_string) def compute_loss(self, pred, target, loss_name): loss = 0. for student, teacher in zip(pred, target): if loss_name == "attention_loss": student = torch.where(student <= -1e2, torch.zeros_like(student).to(self.device), student) teacher = torch.where(teacher <= -1e2, torch.zeros_like(teacher).to(self.device), teacher) tmp_loss = self.compute_loss_(student, teacher, loss_name) loss += tmp_loss return loss
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/losses.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import sys import os import unicodedata import re import logging import csv import argparse import copy import json import time import torch import numpy as np sys.path.append('/workspace/bert/') from tokenization import BertTokenizer from modeling import BertForMaskedLM from utils.utils import set_seed logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") StopWordsList = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't", "'s", "'re"] def strip_accents(text): """ Strip accents from input String. :param text: The input string. :type text: String. :returns: The processed String. :rtype: String. """ try: text = unicode(text, 'utf-8') except (TypeError, NameError): # unicode is a default on python 3 pass text = unicodedata.normalize('NFD', text) text = text.encode('ascii', 'ignore') text = text.decode("utf-8") return str(text) # valid string only includes al def _is_valid(string): return True if not re.search('[^a-z]', string) else False def _read_tsv(input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines def prepare_embedding_retrieval(glove_file, vocab_size=100000): cnt = 0 words = [] embeddings = {} # only read first 100,000 words for fast retrieval with open(glove_file, 'r', encoding='utf-8') as fin: for line in fin: items = line.strip().split() words.append(items[0]) embeddings[items[0]] = [float(x) for x in items[1:]] cnt += 1 if cnt == vocab_size: break vocab = {w: idx for idx, w in enumerate(words)} ids_to_tokens = {idx: w for idx, w in enumerate(words)} vector_dim = len(embeddings[ids_to_tokens[0]]) emb_matrix = np.zeros((vocab_size, vector_dim)) for word, v in embeddings.items(): if word == '<unk>': continue emb_matrix[vocab[word], :] = v # normalize each word vector d = (np.sum(emb_matrix ** 2, 1) ** 0.5) emb_norm = (emb_matrix.T / d).T return emb_norm, vocab, ids_to_tokens class DataAugmentor(object): def __init__(self, model, tokenizer, emb_norm, vocab, ids_to_tokens, M, N, p): self.model = model self.tokenizer = tokenizer self.emb_norm = emb_norm self.vocab = vocab self.ids_to_tokens = ids_to_tokens self.M = M self.N = N self.p = p def _word_distance(self, word): if word not in self.vocab.keys(): return [] word_idx = self.vocab[word] word_emb = self.emb_norm[word_idx] dist = np.dot(self.emb_norm, word_emb.T) dist[word_idx] = -np.Inf candidate_ids = np.argsort(-dist)[:self.M] return [self.ids_to_tokens[idx] for idx in candidate_ids][:self.M] def _masked_language_model(self, sent, word_pieces, mask_id, ptr): tokenized_text = self.tokenizer.tokenize(sent)[: 510] tokenized_text = ['[CLS]'] + tokenized_text tokenized_len = len(tokenized_text) tokenized_text = word_pieces + ['[SEP]'] + tokenized_text[1:] + ['[SEP]'] if len(tokenized_text) > 512: tokenized_text = tokenized_text[:512] token_ids = self.tokenizer.convert_tokens_to_ids(tokenized_text) segments_ids = [0] * (tokenized_len + 1) + [1] * (len(tokenized_text) - tokenized_len - 1) tokens_tensor = torch.tensor([token_ids]).to(device) segments_tensor = torch.tensor([segments_ids]).to(device) self.model.to(device) predictions = self.model(tokens_tensor, segments_tensor) word_candidates = torch.argsort(predictions[0, mask_id], descending=True)[:self.M].tolist() word_candidates = self.tokenizer.convert_ids_to_tokens(word_candidates) return list(filter(lambda x: x.find("##"), word_candidates)) def _word_augment(self, sentence, mask_token_idx, mask_token, ptr): word_pieces = self.tokenizer.tokenize(sentence) word_pieces = ['[CLS]'] + word_pieces[: 510] tokenized_len = len(word_pieces) token_idx = -1 for i in range(1, tokenized_len): if "##" not in word_pieces[i]: token_idx = token_idx + 1 if token_idx < mask_token_idx: word_piece_ids = [] elif token_idx == mask_token_idx: word_piece_ids = [i] else: break else: word_piece_ids.append(i) print("tobe masked", mask_token) for junk in word_piece_ids: print("masking", word_pieces[junk]) if len(word_piece_ids) == 1: word_pieces[word_piece_ids[0]] = '[MASK]' candidate_words = self._masked_language_model( sentence, word_pieces, word_piece_ids[0], ptr) elif len(word_piece_ids) > 1: candidate_words = self._word_distance(mask_token) else: logger.info("invalid input sentence!") return None if len(candidate_words)==0: candidate_words.append(mask_token) return candidate_words def augment(self, sent, blacklist=""): candidate_sents = [sent] all_tokens = self.tokenizer.basic_tokenizer.tokenize(sent) blacklist_tokens = [] if blacklist != "": blacklist_tokens = self.tokenizer.basic_tokenizer.tokenize(blacklist) logger.info(blacklist_tokens) candidate_words = {} ptr = 0 if len(all_tokens) > 512: print("GREATER") #400 chosen to account for additional tokens created by workpiece. Room of 512 - 400 while ptr < len(all_tokens): tokens = all_tokens[ptr: ptr+400] for (idx, word) in enumerate(tokens): temp_sent = " ".join(tokens) if _is_valid(word) and word not in StopWordsList and word not in blacklist_tokens: augment_temp = self._word_augment(temp_sent, idx, word, ptr) if augment_temp is not None: candidate_words[idx] = augment_temp ptr += 400 logger.info(candidate_words) tokens = all_tokens cnt = 0 while cnt < self.N: new_sent = list(tokens) for idx in candidate_words.keys(): candidate_word = random.choice(candidate_words[idx]) x = random.random() if x < self.p: new_sent[idx] = candidate_word if " ".join(new_sent) not in candidate_sents: candidate_sents.append(' '.join(new_sent)) cnt += 1 return candidate_sents class AugmentProcessor(object): def __init__(self, augmentor, glue_dir, task_name): self.augmentor = augmentor self.glue_dir = glue_dir self.task_name = task_name self.augment_ids = {'MRPC': [3, 4], 'MNLI': [8, 9], 'CoLA': [3], 'SST-2': [0], 'STS-B': [7, 8], 'QQP': [3, 4], 'QNLI': [1, 2], 'RTE': [1, 2], 'SQuADv1.1': ['context']} self.filter_flags = { 'MRPC': True, 'MNLI': True, 'CoLA': False, 'SST-2': True, 'STS-B': True, 'QQP': True, 'QNLI': True, 'RTE': True, } assert self.task_name in self.augment_ids def read_augment_write(self): task_dir = os.path.join(self.glue_dir, self.task_name) if "SQuADv2.0" in self.task_name: raise ValueError("Data augmentation not implemented for task: %s" % self.task_name) if "SQuAD" in self.task_name: train_samples = json.load(open(os.path.join(self.glue_dir, "train-v1.1.json"), "r", encoding='utf-8')) output_filename = os.path.join(self.glue_dir, "train-v1.1_aug.json") train_samples_aug = copy.deepcopy(train_samples) else: train_samples = _read_tsv(os.path.join(task_dir, "train.tsv")) output_filename = os.path.join(task_dir, "train_aug.tsv") augment_ids_ = self.augment_ids[self.task_name] if not "SQuAD" in self.task_name: filter_flag = self.filter_flags[self.task_name] if "SQuAD" in self.task_name: for it, entry in enumerate(train_samples["data"]): entry_aug = copy.deepcopy(entry) for i, paragraph in enumerate(entry["paragraphs"]): entry_aug["paragraphs"][i] = copy.deepcopy(paragraph) #Augment contexts for each paragraph add append #to existing list of paragraph contexts print("state", it, i) if "context" in self.augment_ids[self.task_name]: all_answers = "" #Don't augment or rewrite part of context that contains the answer for qas_id, qa in enumerate(paragraph["qas"]): answer = qa["answers"][0] all_answers += " {}".format(answer["text"]) #ignore first since it is the original sample augmented_paragraph_contexts = self.augmentor.augment(paragraph["context"], all_answers)[1:] for augmented_paragraph_context in augmented_paragraph_contexts: good_context = True entry_aug["paragraphs"][i]["context"] = augmented_paragraph_context #fix indices of start position. for qas_id, qa in enumerate(entry_aug["paragraphs"][i]["qas"]): # Since the context gets tokenized, augmented on joined on " ", we do the same with the answer # So that answer is a substring of context answer_tokens = self.augmentor.tokenizer.basic_tokenizer.tokenize(qa["answers"][0]["text"]) entry_aug["paragraphs"][i]["qas"][qas_id]["answers"][0]["text"] = " ".join(answer_tokens) #correct answer start based on new context answer_start_index = augmented_paragraph_context.find(" ".join(answer_tokens)) if answer_start_index == -1: logger.info("Answer: \"{}\" not found in Context \"{}\"".format(" ".join(answer_tokens), augmented_paragraph_context)) good_context = False break else: entry_aug["paragraphs"][i]["qas"][qas_id]["answers"][0]["answer_start"] = answer_start_index if good_context: train_samples_aug["data"][it]["paragraphs"].append(copy.deepcopy(entry_aug["paragraphs"][i])) #Copy to ensure we are modifying original context entry_aug["paragraphs"][i] = copy.deepcopy(paragraph) #Augment questions for each context and append #to existing list of qas #Currently augments questions on un-augmented context #Should it augment questions on augmented context as well? if "question" in self.augment_ids[self.task_name]: for qas_id, qa in enumerate(paragraph["qas"]): #ignore first since it is the original sample augmented_question_texts = self.augmentor.augment(qa["question"])[1:] for augmented_question_text in augmented_question_texts: entry_aug["paragraphs"][i]["qas"][qas_id]["question"] = augmented_question_text train_samples_aug["data"][it]["paragraphs"][i]["qas"].append(copy.deepcopy(entry_aug["paragraphs"][i]["qas"][qas_id])) answer = qa["answers"][0] orig_answer_text = answer["text"] logger.info("Having been processing {} paragraphs".format(str(i+1))) logger.info("Having been processing {} documents".format(str(it+1))) with open(output_filename, 'w', encoding="utf-8") as f: json.dump(train_samples_aug, f) else: with open(output_filename, 'w', newline='', encoding="utf-8") as f: writer = csv.writer(f, delimiter="\t") for (i, line) in enumerate(train_samples): if i == 0 and filter_flag: writer.writerow(line) continue for augment_id in augment_ids_: sent = line[augment_id] augmented_sents = self.augmentor.augment(sent) for augment_sent in augmented_sents: line[augment_id] = augment_sent writer.writerow(line) if (i+1) % 1000 == 0: logger.info("Having been processing {} examples".format(str(i+1))) def main(): parser = argparse.ArgumentParser() parser.add_argument("--pretrained_bert_model", default=None, type=str, required=True, help="Downloaded pretrained model (bert-base-uncased) is under this folder") parser.add_argument("--glove_embs", default=None, type=str, required=True, help="Glove word embeddings file") parser.add_argument("--glue_dir", default=None, type=str, required=True, help="GLUE data dir") parser.add_argument("--task_name", default=None, type=str, required=True, help="Task(eg. CoLA, SST-2) that we want to do data augmentation for its train set") parser.add_argument("--N", default=30, type=int, help="How many times is the corpus expanded?") parser.add_argument("--M", default=15, type=int, help="Choose from M most-likely words in the corresponding position") parser.add_argument("--p", default=0.4, type=float, help="Threshold probability p to replace current word") parser.add_argument('--seed', default=42, type=int, help="random seed for initialization") args = parser.parse_args() # logger.info(args) # Set Seed n_gpu = torch.cuda.device_count() set_seed(args.seed, n_gpu) default_params = { "CoLA": {"N": 30}, "MNLI": {"N": 10}, "MRPC": {"N": 30}, "SST-2": {"N": 20}, "STS-b": {"N": 30}, "QQP": {"N": 10}, "QNLI": {"N": 20}, "RTE": {"N": 30}, "SQuADv1.1": {"N": 15}, } if args.task_name in default_params: args.N = default_params[args.task_name]["N"] # Prepare data augmentor tokenizer = BertTokenizer.from_pretrained(args.pretrained_bert_model) model, config = BertForMaskedLM.from_pretrained(args.pretrained_bert_model) model.eval() emb_norm, vocab, ids_to_tokens = prepare_embedding_retrieval(args.glove_embs) data_augmentor = DataAugmentor(model, tokenizer, emb_norm, vocab, ids_to_tokens, args.M, args.N, args.p) # Do data augmentation processor = AugmentProcessor(data_augmentor, args.glue_dir, args.task_name) processor.read_augment_write() if __name__ == "__main__": start = time.time() main() print("Total time taken {}".format(time.time() - start))
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/data_augmentation.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team and Huawei Noah's Ark Lab. # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT Distillation finetuning runner.""" from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys import time, datetime import math import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm, trange from torch.nn import CrossEntropyLoss, MSELoss from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score sys.path.append('/workspace/bert/') from modeling import BertForSequenceClassification, BertForQuestionAnswering, Project, WEIGHTS_NAME, CONFIG_NAME from schedulers import LinearWarmUpScheduler, ConstantLR from tokenization_utils import BertTokenizer from apex.optimizers import FusedAdam from hooks import * from losses import * from utils.utils import is_main_process, get_rank, get_world_size, unwrap_ddp, set_seed from utils.squad.squad_metrics import compute_predictions, squad_evaluate import utils.squad.squad_metrics as squad_metrics from utils.squad.squad_utils import squad_convert_examples_to_features, SquadResult, RawResult, SquadV1Processor, SquadV2Processor, load_and_cache_examples from itertools import chain csv.field_size_limit(sys.maxsize) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler('debug_layer_loss.log') fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) logger = logging.getLogger() oncloud = True try: import moxing as mox except: oncloud = False class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.seq_length = seq_length self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" print("Using train data") return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): print("Using Aug Data") return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) seq_length = len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 1: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: {}".format(example.label)) logger.info("label_id: {}".format(label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, seq_length=seq_length)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def get_tensor_data(output_mode, features): if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_seq_lengths) return tensor_data, all_label_ids def result_to_file(result, file_name, step, train_summary_writer): with open(file_name, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" {} = {}".format(key, str(result[key]))) writer.write("%s = %s\n" % (key, str(result[key]))) train_summary_writer.add_scalar(key, result[key], step) def do_eval(model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, amp): eval_loss = 0 nb_eval_steps = 0 preds = [] for batch_ in tqdm(eval_dataloader, desc="Evaluating"): batch_ = tuple(t.to(device) for t in batch_) with torch.no_grad(): input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch_ with torch.cuda.amp.autocast(enabled=amp): logits = model(input_ids, segment_ids, input_mask) # create eval loss and other metric required by the task if output_mode == "classification": loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] if output_mode == "classification": preds = np.argmax(preds, axis=1) elif output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(task_name, preds, eval_labels.numpy()) result['eval_loss'] = eval_loss return result def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--teacher_model", default=None, type=str, help="The teacher model dir.") parser.add_argument("--student_model", default=None, type=str, required=True, help="The student model dir.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--amp', action='store_true', default=False, help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay') parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--max_grad_norm', type=float, default=1., help="Gradient Clipping threshold") # added arguments parser.add_argument('--aug_train', action='store_true') parser.add_argument('--value_state_loss', action='store_true', default=False) parser.add_argument('--hidden_state_loss', action='store_true', default=False) parser.add_argument('--use_last_layer', action='store_true', default=False) parser.add_argument('--use_kld', action='store_true', default=False) parser.add_argument('--use_cosine', action='store_true', default=False) parser.add_argument('--average_loss', action='store_true', default=False) parser.add_argument('--eval_step', type=int, default=50) parser.add_argument('--pred_distill', action='store_true') parser.add_argument('--data_url', type=str, default="") parser.add_argument('--temperature', type=float, default=1.) #SQUAD args parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument( "--lang_id", default=0, type=int, help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)", ) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--distill_config', default="distillation_config.json", type=str, help="path the distillation config") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir, exist_ok=True) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.device = device logger.info('device: {}'.format(device)) logger.info('The args: {}'.format(args)) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, "squadv1.1": SquadV1Processor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", "squadv1.1": "classification", } # intermediate distillation default parameters default_params = { "cola": {"num_train_epochs": 50, "max_seq_length": 64}, "mnli": {"num_train_epochs": 5, "max_seq_length": 128}, "mrpc": {"num_train_epochs": 20, "max_seq_length": 128}, "sst-2": {"num_train_epochs": 10, "max_seq_length": 64}, "sts-b": {"num_train_epochs": 20, "max_seq_length": 128}, "qqp": {"num_train_epochs": 5, "max_seq_length": 128}, "qnli": {"num_train_epochs": 10, "max_seq_length": 128}, "rte": {"num_train_epochs": 20, "max_seq_length": 128}, "squadv1.1": {"num_train_epochs": 6, "max_seq_length": 384}, } acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte", "squadv1.1"] corr_tasks = ["sts-b"] mcc_tasks = ["cola"] # Prepare devices #device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info("device: {} n_gpu: {}".format(device, n_gpu)) set_seed(args.seed, n_gpu) # Prepare task settings # Set up tensorboard current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = os.path.join(args.output_dir, current_time, 'train_' + str(get_rank()) + '_of_' + str(get_world_size())) train_summary_writer = SummaryWriter(train_log_dir) task_name = args.task_name.lower() if task_name in default_params: args.max_seq_len = default_params[task_name]["max_seq_length"] if task_name not in processors: raise ValueError("Task not found: %s" % task_name) processor = processors[task_name]() output_mode = output_modes[task_name] label_list = processor.get_labels() if task_name != "squadv1.1" else [] num_labels = len(label_list) tokenizer = BertTokenizer.from_pretrained(args.student_model, do_lower_case=args.do_lower_case) if not args.do_eval: if task_name == "squadv1.1": train_data = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) else: if not args.aug_train: train_examples = processor.get_train_examples(args.data_dir) else: train_examples = processor.get_aug_examples(args.data_dir) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if task_name != "squadv1.1": num_train_optimization_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, output_mode) train_data, _ = get_tensor_data(output_mode, train_features) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True) if args.max_steps > 0: num_train_optimization_steps = args.max_steps else: num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs if "SQuAD" not in args.task_name: eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) eval_data, eval_labels = get_tensor_data(output_mode, eval_features) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4) if not args.do_eval: if "SQuAD" not in args.task_name: teacher_model, teacher_config = BertForSequenceClassification.from_pretrained(args.teacher_model, distill_config=args.distill_config, num_labels=num_labels, pooler=True) else: teacher_model, teacher_config = BertForQuestionAnswering.from_pretrained(args.teacher_model, distill_config=args.distill_config, pooler=False) if "SQuAD" not in args.task_name: student_model, student_config = BertForSequenceClassification.from_pretrained(args.student_model, distill_config=args.distill_config, num_labels=num_labels, pooler=True) else: student_model, student_config = BertForQuestionAnswering.from_pretrained(args.student_model, distill_config=args.distill_config, pooler=False) # We need a projection layer since teacher.hidden_size != student.hidden_size if not args.do_eval: use_projection = student_config.hidden_size != teacher_config.hidden_size if student_config.distillation_config["use_hidden_states"] and use_projection: project = Project(student_config, teacher_config) project_model_file = os.path.join(args.student_model, "project.bin") project_ckpt = torch.load(project_model_file, map_location="cpu") project.load_state_dict(project_ckpt) else: use_projection = False distill_config = {"nn_module_names": []} #Empty list since we don't want to use nn module hooks here distill_hooks_student, distill_hooks_teacher = DistillHooks(distill_config), DistillHooks(distill_config) student_model.register_forward_hook(distill_hooks_student.child_to_main_hook) if not args.do_eval: teacher_model.register_forward_hook(distill_hooks_teacher.child_to_main_hook) if not args.do_eval: teacher_model.to(device) student_model.to(device) if student_config.distillation_config["use_hidden_states"] and use_projection: project.to(device) if args.local_rank != -1: if not args.do_eval: teacher_model = torch.nn.parallel.DistributedDataParallel( teacher_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) student_model = torch.nn.parallel.DistributedDataParallel( student_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if student_config.distillation_config["use_hidden_states"] and use_projection: project = torch.nn.parallel.DistributedDataParallel( project, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if args.do_eval: global_step = 0 num_examples = 0 eval_start = time.time() logger.info("***** Running evaluation *****") student_model.eval() if "SQuAD" not in args.task_name: logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) num_examples = len(eval_examples) else: dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) logger.info(" Num examples = %d", len(features)) logger.info(" Batch size = %d", args.eval_batch_size) result = squad_metrics.evaluate(args, student_model, dataset, examples, features, prefix="final")#global_step) result["global_step"] = global_step num_examples = len(features) eval_end = time.time() logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) logger.info("time for inference {} perf {}".format(eval_end - eval_start, num_examples * 100 / (eval_end - eval_start))) else: scaler = torch.cuda.amp.GradScaler() logger.info("***** Running training *****") logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) # Prepare optimizer param_optimizer = list(student_model.named_parameters()) if student_config.distillation_config["use_hidden_states"] and use_projection: param_optimizer += list(project.named_parameters()) size = 0 for n, p in student_model.named_parameters(): logger.info('n: {}'.format(n)) size += p.nelement() logger.info('Total parameters: {}'.format(size)) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) schedule = 'warmup_linear' if not student_config.distillation_config["use_pred_states"]: scheduler = ConstantLR(optimizer) else: scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps) transformer_losses = TransformerLosses(student_config, teacher_config, device, args) def soft_cross_entropy(predicts, targets): student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1) targets_prob = torch.nn.functional.softmax(targets, dim=-1) return (- targets_prob * student_likelihood).mean() # Train and evaluate global_step = 0 best_dev_acc = 0.0 output_eval_file = os.path.join(args.output_dir, "eval_results.txt") iter_start = time.time() for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0. tr_att_loss = 0. tr_rep_loss = 0. tr_value_loss = 0. tr_cls_loss = 0. student_model.train() nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", ascii=True)): batch = tuple(t.to(device) for t in batch) if "squad" in task_name: input_ids, input_mask, segment_ids = batch[:3] label_ids = None else: input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch if input_ids.size()[0] != args.train_batch_size: continue att_loss = 0. rep_loss = 0. value_loss = 0. cls_loss = 0. with torch.cuda.amp.autocast(enabled=args.amp): student_model(input_ids, segment_ids, input_mask) # Gather student states extracted by hooks temp_model = unwrap_ddp(student_model) student_atts = flatten_states(temp_model.distill_states_dict, "attention_scores") student_reps = flatten_states(temp_model.distill_states_dict, "hidden_states") student_values = flatten_states(temp_model.distill_states_dict, "value_states") student_embeddings = flatten_states(temp_model.distill_states_dict, "embedding_states") student_logits = flatten_states(temp_model.distill_states_dict, "pred_states") if student_config.distillation_config["use_attention_scores"]: bsz, attn_heads, seq_len, _ = student_atts[0].shape #No gradient for teacher training with torch.no_grad(): teacher_model(input_ids, segment_ids, input_mask) # Gather teacher states extracted by hooks temp_model = unwrap_ddp(teacher_model) teacher_atts = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "attention_scores")] teacher_reps = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "hidden_states")] teacher_values = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "value_states")] if student_config.distillation_config["use_pred_states"]: if "squad" in task_name: teacher_logits = [[i.detach() for i in flatten_states(temp_model.distill_states_dict, "pred_states")[0]]] else: teacher_logits = [flatten_states(temp_model.distill_states_dict, "pred_states")[0].detach()] teacher_embeddings = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "embedding_states")] #MiniLM if student_config.distillation_config["student_teacher_layer_mapping"] == "last_layer": if student_config.distillation_config["use_attention_scores"]: student_atts = [student_atts[-1]] new_teacher_atts = [teacher_atts[-1]] if student_config.distillation_config["use_value_states"]: student_values = [student_values[-1]] new_teacher_values = [teacher_values[-1]] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[-1]] new_student_reps = [student_reps[-1]] else: if student_config.distillation_config["use_attention_scores"]: teacher_layer_num = len(teacher_atts) student_layer_num = len(student_atts) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_value_states"]: teacher_layer_num = len(teacher_values) student_layer_num = len(student_values) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_values = [teacher_values[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_hidden_states"]: teacher_layer_num = len(teacher_reps) student_layer_num = len(student_reps) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_reps = [teacher_reps[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] new_student_reps = student_reps if student_config.distillation_config["use_attention_scores"]: att_loss = transformer_losses.compute_loss(student_atts, new_teacher_atts, loss_name="attention_loss") if student_config.distillation_config["use_hidden_states"]: if student_config.distillation_config["use_hidden_states"] and use_projection: rep_loss = transformer_losses.compute_loss(project(new_student_reps), new_teacher_reps, loss_name="hidden_state_loss") else: rep_loss = transformer_losses.compute_loss(new_student_reps, new_teacher_reps, loss_name="hidden_state_loss") if student_config.distillation_config["use_embedding_states"]: if student_config.distillation_config["use_hidden_states"] and use_projection: rep_loss += transformer_losses.compute_loss(project(student_embeddings), teacher_embeddings, loss_name="embedding_state_loss") else: rep_loss += transformer_losses.compute_loss(student_embeddings, teacher_embeddings, loss_name="embedding_state_loss") if student_config.distillation_config["use_value_states"]: value_loss = transformer_losses.compute_loss(student_values, new_teacher_values, loss_name="value_state_loss") if not args.average_loss: loss = rep_loss + att_loss + value_loss else: loss = (rep_loss / len(new_student_reps)) + (att_loss / len(student_atts)) + (value_loss / len(student_values)) if student_config.distillation_config["use_attention_scores"]: tr_att_loss += att_loss.item() if student_config.distillation_config["use_hidden_states"]: tr_rep_loss += rep_loss.item() if student_config.distillation_config["use_value_states"]: tr_value_loss += value_loss.item() #pred layer specific if student_config.distillation_config["use_pred_states"]: if output_mode == "classification": if "squad" in task_name: cls_loss = 0. # Iterate over start and end logits for index, student_logit in enumerate(student_logits[0]): cls_loss += soft_cross_entropy(student_logit / args.temperature, teacher_logits[0][index] / args.temperature) else: cls_loss = soft_cross_entropy(student_logits[0] / args.temperature, teacher_logits[0] / args.temperature) elif output_mode == "regression": loss_mse = MSELoss() cls_loss = loss_mse(student_logits[0].view(-1), label_ids.view(-1)) loss = cls_loss tr_cls_loss += cls_loss.item() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.amp: scaler.scale(loss).backward() scaler.unscale_(optimizer) else: loss.backward() if student_config.distillation_config["use_hidden_states"] and use_projection: torch.nn.utils.clip_grad_norm_(chain(student_model.parameters(), project.parameters()), args.max_grad_norm, error_if_nonfinite=False) else: torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm, error_if_nonfinite=False) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: scheduler.step() if args.amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() global_step += 1 if (global_step + 1) % args.eval_step == 0 and is_main_process(): logger.info("***** Running evaluation *****") logger.info(" Epoch = {} iter {} step".format(epoch_, global_step)) if not "squad" in task_name: logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) student_model.eval() loss = tr_loss / (step + 1) cls_loss = tr_cls_loss / (step + 1) att_loss = tr_att_loss / (step + 1) rep_loss = tr_rep_loss / (step + 1) value_loss = tr_value_loss / (step + 1) result = {} if student_config.distillation_config["use_pred_states"]: if "SQuAD" not in args.task_name: result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) else: dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) result = squad_metrics.evaluate(args, student_model, dataset, examples, features, prefix="final")#global_step) result['global_step'] = global_step result['lr'] = optimizer.param_groups[0]["lr"] result['cls_loss'] = cls_loss result['att_loss'] = att_loss result['rep_loss'] = rep_loss result['value_loss'] = value_loss result['loss'] = loss result['perf'] = (global_step + 1) * get_world_size() * args.train_batch_size * args.gradient_accumulation_steps / (time.time() - iter_start) if is_main_process(): result_to_file(result, output_eval_file, global_step, train_summary_writer) if not student_config.distillation_config["use_pred_states"]: save_model = True else: save_model = False if task_name in acc_tasks and result['acc'] > best_dev_acc: best_dev_acc = result['acc'] save_model = True if task_name in corr_tasks and result['corr'] > best_dev_acc: best_dev_acc = result['corr'] save_model = True if task_name in mcc_tasks and result['mcc'] > best_dev_acc: best_dev_acc = result['mcc'] save_model = True if save_model and is_main_process(): logger.info("***** Save model *****") model_to_save = student_model.module if hasattr(student_model, 'module') else student_model model_name = WEIGHTS_NAME output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Test mnli-mm if student_config.distillation_config["use_pred_states"] and task_name == "mnli": task_name = "mnli-mm" processor = processors[task_name]() if not os.path.exists(args.output_dir + '-MM'): os.makedirs(args.output_dir + '-MM') eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) eval_data, eval_labels = get_tensor_data(output_mode, eval_features) logger.info("***** Running mm evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4) result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) result['global_step'] = global_step tmp_output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt") result_to_file(result, tmp_output_eval_file, global_step, train_summary_writer) task_name = 'mnli' if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url) student_model.train() train_summary_writer.flush() train_summary_writer.close() if __name__ == "__main__": start_time = time.time() main() print("Total time taken:", time.time() - start_time)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/task_distill.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import copy import torch ckpt = sys.argv[1] model = torch.load(ckpt) if "model" in model.keys(): model = model["model"] torch.save(model, ckpt)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/convert_ckpts.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import h5py import numpy as np folder = sys.argv[1] print(folder) files = [os.path.join(folder, f) for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] counts = [] for input_file in files: f = h5py.File(input_file, "r") keys = ['input_ids'] inputs = np.asarray(f[keys[0]][:]) print(inputs.shape) counts.append(inputs.shape[0]) f.close() print(counts) print(sum(counts)) print(len(counts))
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/count_records.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/__init__.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import glob import h5py import torch import random import collections import numpy as np from concurrent.futures import ProcessPoolExecutor import utils from torch.nn import functional as F from torch.utils.data import DataLoader, Sampler, RandomSampler, SequentialSampler, Dataset # model inputs - it's a bit nicer to use a namedtuple rather than keep the # features as a dict Inputs = collections.namedtuple( "Inputs", ["input_ids", "input_mask", "segment_ids", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) # Workaround because python functions are not picklable class WorkerInitObj(object): def __init__(self, seed): self.seed = seed def __call__(self, id): np.random.seed(seed=self.seed + id) random.seed(self.seed + id) class PretrainDataset(Dataset): def __init__(self, input_file, max_pred_length): self.input_file = input_file self.max_pred_length = max_pred_length f = h5py.File(input_file, "r") keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids', 'next_sentence_labels'] self.inputs = [np.asarray(f[key][:]) for key in keys] f.close() def __len__(self): 'Denotes the total number of samples' return len(self.inputs[0]) def __getitem__(self, index): [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [ torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy( np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)] masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1 index = self.max_pred_length # store number of masked tokens in index padded_mask_indices = (masked_lm_positions == 0).nonzero() if len(padded_mask_indices) != 0: index = padded_mask_indices[0].item() masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] class DatasetIterator: def __init__(self, config, batch_size, world_size=1, rank=0): self.config = config self.batch_size = batch_size self.world_size = world_size self.rank = rank self.index = 0 self.future_dataloader = None self.worker_init = WorkerInitObj(config.seed + rank) self.pool = ProcessPoolExecutor(max_workers=1) self.num_files = len(config.input_files) # Bootstrap files if few than processes if self.num_files < world_size: lcm = (len(input_files) * world_size) // math.gcd(len(input_files), world_size) factor = lcm // self.num_files temp_input_files = [] for i in range(factor): temp_input_files.extend(config.input_files) config.input_files = temp_input_files self.input_files = config.input_files[rank::world_size] random.seed(config.seed) random.shuffle(self.input_files) def __iter__(self): self.load_future() return self def __next__(self): dataloader = self.future_dataloader.result(timeout=None) self.index += 1 if self.index >= len(self.input_files): self.index = 0 random.shuffle(self.input_files) self.load_future() return dataloader def load_future(self): self.future_dataloader = self.pool.submit( create_dataset, self.input_files[self.index], self.config.max_seq_length, self.batch_size, self.worker_init ) def load_state_dict(self, state_dict): self.index = state_dict['file_index'] def state_dict(self): return { 'file_index': self.index - 1, # We want to point to the current dataloader, not a future one } def create_dataset(input_file, max_seq_length, batch_size, worker_init, num_cpu_threads=4): print("using file", input_file) dataset = PretrainDataset( input_file=input_file, max_pred_length=max_seq_length) sampler = RandomSampler(dataset) dataloader = DataLoader( dataset, sampler=sampler, batch_size=batch_size, num_workers=num_cpu_threads, worker_init_fn=worker_init, drop_last=True, pin_memory=True) return dataloader
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/data_utils.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed as dist import random import numpy as np from pathlib import Path def unwrap_ddp(model): if isinstance(model, torch.nn.parallel.distributed.DistributedDataParallel): return model.module return model def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def get_world_size(): if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def is_main_process(): return get_rank() == 0 def barrier(): if dist.is_available() and dist.is_initialized(): dist.barrier() def format_step(step): if isinstance(step, str): return step s = "" if len(step) > 0: s += "Training Epoch: {} ".format(step[0]) if len(step) > 1: s += "Training Iteration: {} ".format(step[1]) if len(step) > 2: s += "Validation Iteration: {} ".format(step[2]) return s def mkdir(path): Path(path).mkdir(parents=True, exist_ok=True) def mkdir_by_main_process(path): if is_main_process(): mkdir(path) barrier() def set_seed(seed, n_gpu): random.seed(seed + get_rank()) np.random.seed(seed + get_rank()) torch.manual_seed(seed + get_rank()) if n_gpu > 0: torch.cuda.manual_seed_all(seed + get_rank())
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/utils.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/squad/__init__.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import collections import json import logging import math import re import string import time import tqdm import os import torch from tokenization import BasicTokenizer from torch.utils.data import DataLoader, RandomSampler, SequentialSampler logger = logging.getLogger(__name__) def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(examples, preds): """ Computes the exact and f1 scores from the examples and the model predictions """ exact_scores = {} f1_scores = {} for example in examples: qas_id = example.qas_id gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qas_id not in preds: print("Missing prediction for %s" % qas_id) continue prediction = preds[qas_id] exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers) f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] ) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval["%s_%s" % (prefix, k)] = new_eval[k] def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh main_eval["has_ans_exact"] = has_ans_exact main_eval["has_ans_f1"] = has_ans_f1 def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for _, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0): qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples} has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer] no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer] if no_answer_probs is None: no_answer_probs = {k: 0.0 for k in preds} exact, f1 = get_raw_scores(examples, preds) exact_threshold = apply_no_ans_threshold( exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold ) f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) evaluation = make_eval_dict(exact_threshold, f1_threshold) if has_answer_qids: has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids) merge_eval(evaluation, has_ans_eval, "HasAns") if no_answer_qids: no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids) merge_eval(evaluation, no_ans_eval, "NoAns") if no_answer_probs: find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer) return evaluation def compute_predictions( all_examples, all_features, all_results, args, output_prediction_file, output_nbest_file, output_null_log_odds_file, ): answers, nbest_answers = get_answers(all_examples, all_features, all_results, args) with open(output_prediction_file, "w") as writer: writer.write(json.dumps(answers, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(nbest_answers, indent=4) + "\n") # if args.version_2_with_negative: # with open(output_null_log_odds_file, "w") as writer: # writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return answers RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def get_answers(examples, features, results, args): predictions = collections.defaultdict(list) # it is possible that one example corresponds to multiple features _Prediction = collections.namedtuple('_Prediction', ['text', 'start_logit', 'end_logit']) if args.version_2_with_negative: null_vals = collections.defaultdict(lambda: (float("inf"), 0, 0)) for ex, feat, result in match_results(examples, features, results): if not args.joint_prediction: start_indices = _get_best_indices(result.start_logits, args.n_best_size) end_indices = _get_best_indices(result.end_logits, args.n_best_size) prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args) feature_null_score = result.start_logits[0] + result.end_logits[0] else: prelim_predictions = get_valid_prelim_predictions_joint_head(result.start_top_index, result.end_top_index, feat, result, args) # start_indices = result.start_top_index # end_indices = result.end_top_index feature_null_score = result.cls_logits prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) if args.version_2_with_negative and feature_null_score < null_vals[ex.qas_id][0]: null_vals[ex.qas_id] = (feature_null_score, result.start_logits[0], result.end_logits[0]) curr_predictions = [] seen_predictions = set() for pred in prelim_predictions: if len(curr_predictions) == args.n_best_size: break if pred.start_index > 0: final_text = get_answer_text(ex, feat, pred, args) else: final_text = '' if final_text in seen_predictions: continue seen_predictions.add(final_text) curr_predictions.append(_Prediction(final_text, pred.start_logit, pred.end_logit)) predictions[ex.qas_id] += curr_predictions # Add empty prediction if args.version_2_with_negative: for qas_id in predictions.keys(): predictions[qas_id].append(_Prediction('', null_vals[qas_id][1], null_vals[qas_id][2])) nbest_answers = collections.defaultdict(list) answers = {} for qas_id, preds in predictions.items(): seen_predictions = set() nbest = [] for pred in sorted(predictions[qas_id], key=lambda x: (x.start_logit + x.end_logit), reverse=True): if len(nbest) >= args.n_best_size: break if pred.text in seen_predictions: continue seen_predictions.add(pred.text) nbest.append(pred) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if not nbest or (args.version_2_with_negative and len(nbest) == 1): nbest.append(_Prediction(text="empty", start_logit=0.0, end_logit=0.0)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry and entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_answers[qas_id].append(output) if args.version_2_with_negative: if not args.joint_prediction: score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit else: score_diff = null_vals[qas_id][0] if score_diff > args.null_score_diff_threshold: answers[qas_id] = "" else: answers[qas_id] = best_non_null_entry.text else: answers[qas_id] = nbest_answers[qas_id][0]['text'] return answers, nbest_answers def get_answer_text(example, feature, pred, args): tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging) return final_text def get_valid_prelim_predictions_joint_head(start_indices, end_indices, feature, result, args): _PrelimPrediction = collections.namedtuple( "PrelimPrediction", ["start_index", "end_index", "start_logit", "end_logit"]) prelim_predictions = [] # for start_index in start_indices: for i in range(args.beam_size): start_index = start_indices[i] for j in range(args.beam_size): # for end_index in end_indices: end_index = end_indices[i * args.beam_size + j] if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > args.max_answer_length: continue prelim_predictions.append( _PrelimPrediction( start_index=start_index, end_index=end_index, start_logit=result.start_logits[i], # start_index], end_logit=result.end_logits[i * args.beam_size + j])) # end_index])) return prelim_predictions def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args): _PrelimPrediction = collections.namedtuple( "PrelimPrediction", ["start_index", "end_index", "start_logit", "end_logit"]) prelim_predictions = [] for start_index in start_indices: for end_index in end_indices: if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > args.max_answer_length: continue prelim_predictions.append( _PrelimPrediction( start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) return prelim_predictions def match_results(examples, features, results): unique_f_ids = set([f.unique_id for f in features]) unique_r_ids = set([r.unique_id for r in results]) matching_ids = unique_f_ids & unique_r_ids features = [f for f in features if f.unique_id in matching_ids] results = [r for r in results if r.unique_id in matching_ids] features.sort(key=lambda x: x.unique_id) results.sort(key=lambda x: x.unique_id) for f, r in zip(features, results): # original code assumes strict ordering of examples. TODO: rewrite this yield examples[f.example_index], f, r def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indices(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indices = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indices.append(index_and_score[i][0]) return best_indices def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def to_list(tensor): return tensor.detach().cpu().tolist() def evaluate(args, model, dataset, examples, features, prefix=""): # if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: # os.makedirs(args.output_dir) args.eval_batch_size = args.train_batch_size#args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate # if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): # model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = time.time()#timeit.default_timer() # for batch in tqdm(eval_dataloader, desc="Evaluating"): for batch in eval_dataloader: # for batch in eval_dataloader: model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], #"cls_index": batch[4], #"p_mask": batch[5], #"eval": True, } feature_indices = batch[3] with torch.cuda.amp.autocast(enabled=args.amp): outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = RawResult(unique_id, start_logits, end_logits) all_results.append(result) eval_time = time.time() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top # end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions( examples, features, all_results, args, output_prediction_file, output_nbest_file, output_null_log_odds_file, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) results["acc"] = results["f1"] return results
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/squad/squad_metrics.py
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import math import json import logging import os import csv import sys from functools import partial from multiprocessing import Pool, cpu_count import numpy as np from tqdm import tqdm from tokenization import BasicTokenizer, whitespace_tokenize sys.path.append('/workspace/bert/') from utils.utils import get_rank logger = logging.getLogger(__name__) #check for PyT or TF try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"): import torch _torch_available = True # pylint: disable=invalid-name logger.info("PyTorch version {} available.".format(torch.__version__)) else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False except ImportError: _torch_available = False # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"): import tensorflow as tf assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2 _tf_available = True # pylint: disable=invalid-name logger.info("TensorFlow version {} available.".format(tf.__version__)) else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False except (ImportError, AssertionError): _tf_available = False # pylint: disable=invalid-name def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available if is_torch_available(): import torch from torch.utils.data import TensorDataset if is_tf_available(): import tensorflow as tf class DataProcessor: """Base class for data converters for sequence classification data sets.""" def get_example_from_tensor_dict(self, tensor_dict): """Gets an example from a dict with tensorflow tensors Args: tensor_dict: Keys and values should match the corresponding Glue tensorflow_dataset examples. """ raise NotImplementedError() def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() def tfds_map(self, example): """Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format.""" if len(self.get_labels()) > 1: example.label = self.get_labels()[int(example.label)] return example @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8-sig") as f: return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _new_check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # if len(doc_spans) == 1: # return True best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span["start"] + doc_span["length"] - 1 if position < doc_span["start"]: continue if position > end: continue num_left_context = position - doc_span["start"] num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def _is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training): features = [] if is_training and not example.is_impossible: # Get start and end position start_position = example.start_position end_position = example.end_position # If the answer cannot be found in the text, then skip this example. actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)]) cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) return [] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text ) spans = [] truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length) sequence_added_tokens = ( tokenizer.max_len - tokenizer.max_len_single_sentence + 1 if "roberta" in str(type(tokenizer)) or "camembert" in str(type(tokenizer)) else tokenizer.max_len - tokenizer.max_len_single_sentence ) sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair span_doc_tokens = all_doc_tokens while len(spans) * doc_stride < len(all_doc_tokens): encoded_dict = tokenizer.encode_plus( truncated_query if tokenizer.padding_side == "right" else span_doc_tokens, span_doc_tokens if tokenizer.padding_side == "right" else truncated_query, max_length=max_seq_length, return_overflowing_tokens=True, pad_to_max_length=True, stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, truncation_strategy="only_second" if tokenizer.padding_side == "right" else "only_first", return_token_type_ids=True, ) paragraph_len = min( len(all_doc_tokens) - len(spans) * doc_stride, max_seq_length - len(truncated_query) - sequence_pair_added_tokens, ) if tokenizer.pad_token_id in encoded_dict["input_ids"]: if tokenizer.padding_side == "right": non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)] else: last_padding_id_position = ( len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id) ) non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :] else: non_padded_ids = encoded_dict["input_ids"] tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) token_to_orig_map = {} for i in range(paragraph_len): index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] encoded_dict["paragraph_len"] = paragraph_len encoded_dict["tokens"] = tokens encoded_dict["token_to_orig_map"] = token_to_orig_map encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens encoded_dict["token_is_max_context"] = {} encoded_dict["start"] = len(spans) * doc_stride encoded_dict["length"] = paragraph_len spans.append(encoded_dict) if "overflowing_tokens" not in encoded_dict: break span_doc_tokens = encoded_dict["overflowing_tokens"] for doc_span_index in range(len(spans)): for j in range(spans[doc_span_index]["paragraph_len"]): is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) index = ( j if tokenizer.padding_side == "left" else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j ) spans[doc_span_index]["token_is_max_context"][index] = is_max_context for span in spans: # Identify the position of the CLS token cls_index = span["input_ids"].index(tokenizer.cls_token_id) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) p_mask = np.ones_like(span["token_type_ids"]) if tokenizer.padding_side == "right": p_mask[len(truncated_query) + sequence_added_tokens :] = 0 else: p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0 pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id) special_token_indices = np.asarray( tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True) ).nonzero() p_mask[pad_token_indices] = 1 p_mask[special_token_indices] = 1 # Set the cls index to 0: the CLS index can be used for impossible answers p_mask[cls_index] = 0 span_is_impossible = example.is_impossible start_position = 0 end_position = 0 if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = span["start"] doc_end = span["start"] + span["length"] - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = cls_index end_position = cls_index span_is_impossible = True else: if tokenizer.padding_side == "left": doc_offset = 0 else: doc_offset = len(truncated_query) + sequence_added_tokens start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset features.append( SquadFeatures( span["input_ids"], span["attention_mask"], span["token_type_ids"], cls_index, p_mask.tolist(), example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing. unique_id=0, paragraph_len=span["paragraph_len"], token_is_max_context=span["token_is_max_context"], tokens=span["tokens"], token_to_orig_map=span["token_to_orig_map"], start_position=start_position, end_position=end_position, is_impossible=span_is_impossible, qas_id=example.qas_id, ) ) return features def squad_convert_example_to_features_init(tokenizer_for_convert): global tokenizer tokenizer = tokenizer_for_convert def squad_convert_examples_to_features( examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1, tqdm_enabled=True, ): """ Converts a list of examples into a list of features that can be directly given as input to a model. It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. Args: examples: list of :class:`~transformers.data.processors.squad.SquadExample` tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer` max_seq_length: The maximum sequence length of the inputs. doc_stride: The stride used when the context is too large and is split across several features. max_query_length: The maximum length of the query. is_training: whether to create features for model evaluation or model training. return_dataset: Default False. Either 'pt' or 'tf'. if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset threads: multiple processing threadsa-smi Returns: list of :class:`~transformers.data.processors.squad.SquadFeatures` Example:: processor = SquadV2Processor() examples = processor.get_dev_examples(data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, ) """ # Defining helper methods features = [] threads = min(threads, cpu_count()) with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: annotate_ = partial( squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=is_training, ) features = list( tqdm( p.imap(annotate_, examples, chunksize=32), total=len(examples), desc="convert squad examples to features", disable=not tqdm_enabled, ) ) new_features = [] unique_id = 1000000000 example_index = 0 for example_features in tqdm( features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled ): if not example_features: continue for example_feature in example_features: example_feature.example_index = example_index example_feature.unique_id = unique_id new_features.append(example_feature) unique_id += 1 example_index += 1 features = new_features del new_features if return_dataset == "pt": if not is_torch_available(): raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.") # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) if not is_training: all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask ) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset( all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask, all_is_impossible, ) return features, dataset elif return_dataset == "tf": if not is_tf_available(): raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.") def gen(): for i, ex in enumerate(features): yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, "feature_index": i, "qas_id": ex.qas_id, }, { "start_positions": ex.start_position, "end_positions": ex.end_position, "cls_index": ex.cls_index, "p_mask": ex.p_mask, "is_impossible": ex.is_impossible, }, ) # Why have we split the batch into a tuple? PyTorch just has a list of tensors. train_types = ( { "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, "feature_index": tf.int64, "qas_id": tf.string, }, { "start_positions": tf.int64, "end_positions": tf.int64, "cls_index": tf.int64, "p_mask": tf.int32, "is_impossible": tf.int32, }, ) train_shapes = ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), "feature_index": tf.TensorShape([]), "qas_id": tf.TensorShape([]), }, { "start_positions": tf.TensorShape([]), "end_positions": tf.TensorShape([]), "cls_index": tf.TensorShape([]), "p_mask": tf.TensorShape([None]), "is_impossible": tf.TensorShape([]), }, ) return tf.data.Dataset.from_generator(gen, train_types, train_shapes) else: return features class SquadProcessor(DataProcessor): """ Processor for the SQuAD data set. Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively. """ def __init__(self): self.train_file = None self.dev_file = None def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): if not evaluate: answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8") answer_start = tensor_dict["answers"]["answer_start"][0].numpy() answers = [] else: answers = [ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")} for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"]) ] answer = None answer_start = None return SquadExample( qas_id=tensor_dict["id"].numpy().decode("utf-8"), question_text=tensor_dict["question"].numpy().decode("utf-8"), context_text=tensor_dict["context"].numpy().decode("utf-8"), answer_text=answer, start_position_character=answer_start, title=tensor_dict["title"].numpy().decode("utf-8"), answers=answers, ) def get_examples_from_dataset(self, dataset, evaluate=False): """ Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset. Args: dataset: The tfds dataset loaded from `tensorflow_datasets.load("squad")` evaluate: boolean specifying if in evaluation mode or in training mode Returns: List of SquadExample Examples:: import tensorflow_datasets as tfds dataset = tfds.load("squad") training_examples = get_examples_from_dataset(dataset, evaluate=False) evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) """ if evaluate: dataset = dataset["validation"] else: dataset = dataset["train"] examples = [] for tensor_dict in tqdm(dataset): examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) return examples def get_train_examples(self, data_dir, filename=None): """ Returns the training examples from the data directory. Args: data_dir: Directory containing the data files used for training and evaluating. filename: None by default, specify this if the training file has a different name than the original one which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. """ if data_dir is None: data_dir = "" if self.train_file is None: raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") opening = self.train_file if filename is None else filename print("oprning file", opening) with open( os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8" ) as reader: input_data = json.load(reader)["data"] return self._create_examples(input_data, "train") def get_dev_examples(self, data_dir, filename=None): """ Returns the evaluation example from the data directory. Args: data_dir: Directory containing the data files used for training and evaluating. filename: None by default, specify this if the evaluation file has a different name than the original one which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. """ if data_dir is None: data_dir = "" if self.dev_file is None: raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") with open( os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8" ) as reader: input_data = json.load(reader)["data"] return self._create_examples(input_data, "dev") def _create_examples(self, input_data, set_type): is_training = set_type == "train" examples = [] for entry in tqdm(input_data): title = entry["title"] for paragraph in entry["paragraphs"]: context_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position_character = None answer_text = None answers = [] if "is_impossible" in qa: is_impossible = qa["is_impossible"] else: is_impossible = False if not is_impossible: if is_training: answer = qa["answers"][0] answer_text = answer["text"] start_position_character = answer["answer_start"] else: answers = qa["answers"] example = SquadExample( qas_id=qas_id, question_text=question_text, context_text=context_text, answer_text=answer_text, start_position_character=start_position_character, title=title, is_impossible=is_impossible, answers=answers, ) examples.append(example) return examples class SquadV1Processor(SquadProcessor): def __init__(self, aug_data=False): if not aug_data: self.train_file = "train-v1.1.json" else: self.train_file = "train-v1.1_aug.json" print("Using {} for training".format(self.train_file)) self.dev_file = "dev-v1.1.json" class SquadV2Processor(SquadProcessor): def __init__(self, aug_data=False): if not aug_data: self.train_file = "train-v2.0.json" else: self.train_file = "train-v2.0_aug.json" self.dev_file = "dev-v2.0.json" class SquadExample(object): """ A single training/test example for the Squad dataset, as loaded from disk. Args: qas_id: The example's unique identifier question_text: The question string context_text: The context string answer_text: The answer string start_position_character: The character position of the start of the answer title: The title of the example answers: None by default, this is used during evaluation. Holds answers as well as their start positions. is_impossible: False by default, set to True if the example has no possible answer. """ def __init__( self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False, ): self.qas_id = qas_id self.question_text = question_text self.context_text = context_text self.answer_text = answer_text self.title = title self.is_impossible = is_impossible self.answers = answers self.start_position, self.end_position = 0, 0 doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True # Split on whitespace so that different tokens may be attributed to their original position. for c in self.context_text: if _is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) self.doc_tokens = doc_tokens self.char_to_word_offset = char_to_word_offset # Start and end positions only has a value during evaluation. if start_position_character is not None and not is_impossible: self.start_position = char_to_word_offset[start_position_character] self.end_position = char_to_word_offset[ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) ] class SquadFeatures(object): """ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample` using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. token_type_ids: Segment token indices to indicate first and second portions of the inputs. cls_index: the index of the CLS token. p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer example_index: the index of the example unique_id: The unique Feature identifier paragraph_len: The length of the context token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object. If a token does not have their maximum context in this feature object, it means that another feature object has more information related to that token and should be prioritized over this feature for that token. tokens: list of tokens corresponding to the input ids token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. start_position: start of the answer token index end_position: end of the answer token index """ def __init__( self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: str = None, ): self.input_ids = input_ids self.attention_mask = attention_mask self.token_type_ids = token_type_ids self.cls_index = cls_index self.p_mask = p_mask self.example_index = example_index self.unique_id = unique_id self.paragraph_len = paragraph_len self.token_is_max_context = token_is_max_context self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.qas_id = qas_id class SquadResult(object): """ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. Args: unique_id: The unique identifier corresponding to that example. start_logits: The logits corresponding to the start of the answer end_logits: The logits corresponding to the end of the answer """ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): self.start_logits = start_logits self.end_logits = end_logits self.unique_id = unique_id if start_top_index: self.start_top_index = start_top_index self.end_top_index = end_top_index self.cls_logits = cls_logits RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) # Method to load/cache and return processed squad dataset def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): #args.data_dir = args.glue_dir args.cache_dir = args.data_dir args.version_2_with_negative = False args.overwrite_cache = False#True args.max_seq_length = 384 args.doc_stride = 128 args.max_query_length = 64 args.threads = 1 args.train_file = None args.predict_file = None args.joint_prediction = False if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir or args.cache_dir or "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "2.0" if args.version_2_with_negative else "1.1", "dev" if evaluate else "train_aug" if args.aug_train else "train", str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists # cached_features_file = "/squad/train-v1.1.json_bert-large-uncased_384_128_64" if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) if not output_examples: dataset = features_and_dataset["dataset"] else: features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) del features_and_dataset else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor(aug_data=args.aug_train).get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: print("process {} calling".format(get_rank())) processor = SquadV2Processor(aug_data=args.aug_train) if args.version_2_with_negative else SquadV1Processor(aug_data=args.aug_train) if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) if not output_examples: torch.save({"dataset": dataset}, cached_features_file) else: torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/distillation/utils/squad/squad_utils.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseSaver, Format, load_from_file, ) from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file LOGGER = logging.getLogger("export_model") INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.PYT] OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TS_TRACE, Format.TS_SCRIPT, Format.ONNX] def _get_args(): parser = argparse.ArgumentParser( description="Script for exporting models from supported frameworks.", allow_abbrev=False ) parser.add_argument("--input-path", help="Path to input python module", required=True) parser.add_argument( "--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True ) parser.add_argument("--output-path", help="Path to output model file", required=True) parser.add_argument( "--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True ) parser.add_argument("--dataloader", help="Path to python module containing data loader") parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value: saver_type = f"{Format.PYT.value}--{Format.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) ArgParserGenerator(Saver).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") dataloader_fn = None if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) Loader: BaseLoader = loaders.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) model = loader.load(args.input_path, dataloader_fn=dataloader_fn, output_type=args.output_type) LOGGER.info("inputs: %s", model.inputs) LOGGER.info("outputs: %s", model.outputs) if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value: saver_type = f"{Format.PYT.value}--{Format.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) saver = ArgParserGenerator(Saver).from_args(args) saver.save(model, args.output_path, dataloader_fn) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/export_model.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import subprocess import sys from argparse import Namespace from typing import Any, Dict, List, Optional import numpy as np from run_squad import RawResult, convert_examples_to_features, get_answers, read_squad_examples from tokenization import BertTokenizer # from triton.deployment_toolkit.core import BaseMetricsCalculator class MetricsCalculator(BaseMetricsCalculator): def __init__( self, eval_script: str = "data/squad/v1.1/evaluate-v1.1.py", predict_file: str = "", output_dir: str = "./", n_best_size: int = 20, max_answer_length: int = 30, version_2_with_negative: bool = False, max_seq_length: int = 384, doc_stride: int = 128, max_query_length: int = 64, vocab_file: str = "", do_lower_case: bool = True, max_len: int = 512, ): tokenizer = BertTokenizer(vocab_file, do_lower_case=do_lower_case, max_len=max_len) # for bert large self.eval_examples = read_squad_examples( input_file=predict_file, is_training=False, version_2_with_negative=version_2_with_negative ) self.eval_features = convert_examples_to_features( examples=self.eval_examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, ) self.output_dir = output_dir self.eval_script = eval_script self.predict_file = predict_file args = Namespace( version_2_with_negative=version_2_with_negative, n_best_size=n_best_size, max_answer_length=max_answer_length, verbose_logging=False, do_lower_case=do_lower_case, ) self.args = args self.all_results: List[RawResult] = [] def _calc(self) -> Dict[str, float]: dataset_size = len(self.eval_features) self.all_results = self.all_results[:dataset_size] output_prediction_file = os.path.join(self.output_dir, "predictions.json") answers, _ = get_answers(self.eval_examples, self.eval_features, self.all_results, self.args) with open(output_prediction_file, "w") as f: f.write(json.dumps(answers, indent=4) + "\n") eval_out = subprocess.check_output( [sys.executable, self.eval_script, self.predict_file, output_prediction_file] ) scores = str(eval_out).strip() # exact_match = float(scores.split(":")[1].split(",")[0]) f1 = float(scores.split(":")[2].split("}")[0]) return {"f1": f1} def update( self, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ): start_logits = y_pred["output__0"] end_logits = y_pred["output__1"] for unique_id, start_logit, end_logit in zip(ids, start_logits, end_logits): start_logit = start_logit.tolist() end_logit = end_logit.tolist() raw_result = RawResult(unique_id=unique_id, start_logits=start_logit, end_logits=end_logit) self.all_results.append(raw_result) @property def metrics(self) -> Dict[str, float]: return self._calc()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/metrics.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class. Data provided to `MetricsCalculator` are obtained from dump files stored in directory pointed by `--dump-dir` argument. Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts. Output data is stored in csv file pointed by `--csv` argument. Example call: ```shell script python ./triton/calculate_metrics.py \ --dump-dir /results/dump_triton \ --csv /results/accuracy_results.csv \ --metrics metrics.py \ --metric-class-param1 value ``` """ import argparse import csv import logging import string from pathlib import Path # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file from .deployment_toolkit.dump import JsonDumpReader LOGGER = logging.getLogger("calculate_metrics") TOTAL_COLUMN_NAME = "_total_" def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False) parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True) parser.add_argument("--csv", help="Path to csv file", required=True) parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True) args, *_ = parser.parse_known_args() MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") ArgParserGenerator(MetricsCalculator).update_argparser(parser) args = parser.parse_args() LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args) reader = JsonDumpReader(args.dump_dir) for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]): ids = list(ids["ids"]) if ids is not None else None metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true) metrics = metrics_calculator.metrics metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])] if metric_names_with_space: raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}") csv_path = Path(args.csv) csv_path.parent.mkdir(parents=True, exist_ok=True) with csv_path.open("w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys())) writer.writeheader() writer.writerow(metrics) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/calculate_metrics.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script. It sends a request with data obtained from pointed data loader and dumps received data into dump files. Those files are stored in directory pointed by `--output-dir` argument. Currently, the client communicates with the Triton server asynchronously using GRPC protocol. Example call: ```shell script python ./triton/run_inference_on_triton.py \ --server-url localhost:8001 \ --model-name ResNet50 \ --model-version 1 \ --dump-labels \ --output-dir /results/dump_triton ``` """ import argparse import functools import logging import queue import threading import time import copy import traceback from pathlib import Path from typing import Optional from tqdm import tqdm # pytype: disable=import-error try: from tritonclient import utils as client_utils # noqa: F401 from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput except ImportError: from tritongrpcclient import InferenceServerClient, InferInput, InferRequestedOutput # pytype: enable=import-error # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file from .deployment_toolkit.dump import JsonDumpWriter LOGGER = logging.getLogger("run_inference_on_triton") class SyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s def __iter__(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) error = self._verify_triton_state(client) if error: raise RuntimeError(f"Could not communicate to Triton Server: {error}") LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) outputs_req = [InferRequestedOutput(name) for name in outputs] for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) results = client.infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, client_timeout=self._response_wait_t, ) y_pred = {name: results.as_numpy(name) for name in output_names} yield ids, x, y_pred, y_real def _verify_triton_state(self, triton_client): if not triton_client.is_server_live(): return f"Triton server {self._server_url} is not live" elif not triton_client.is_server_ready(): return f"Triton server {self._server_url} is not ready" elif not triton_client.is_model_ready(self._model_name, self._model_version): return f"Model {self._model_name}:{self._model_version} is not ready" return None class AsyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 DEFAULT_MAX_UNRESP_REQS = 128 DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, max_unresponded_reqs: Optional[int] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs self._results = queue.Queue() self._processed_all = False self._errors = [] self._num_waiting_for = 0 self._sync = threading.Condition() self._req_thread = threading.Thread(target=self.req_loop, daemon=True) def __iter__(self): self._req_thread.start() timeout_s = 0.050 # check flags processed_all and error flags every 50ms while True: try: ids, x, y_pred, y_real = self._results.get(timeout=timeout_s) yield ids, x, y_pred, y_real except queue.Empty: shall_stop = self._processed_all or self._errors if shall_stop: break LOGGER.debug("Waiting for request thread to stop") self._req_thread.join() if self._errors: error_msg = "\n".join(map(str, self._errors)) raise RuntimeError(error_msg) def _on_result(self, ids, x, y_real, output_names, result, error): with self._sync: request_id = str(ids[0]) NOT_MATCHING_REQUEST_ID_MSG = ( "Error during processing result - request_id doesn't match. This shouldn't have happened." ) if error: response_id = error.get_response().id if response_id != request_id: raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG) self._errors.append(error) else: response_id = result.get_response().id if response_id != request_id: raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG) y_pred = {name: result.as_numpy(name) for name in output_names} self._results.put((ids, x, y_pred, y_real)) self._num_waiting_for -= 1 self._sync.notify_all() def req_loop(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) self._errors = self._verify_triton_state(client) if self._errors: return LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) self._num_waiting_for = 0 for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) outputs_req = [InferRequestedOutput(name) for name in outputs] with self._sync: def _check_can_send(): return self._num_waiting_for < self._max_unresp_reqs can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t) if not can_send: error_msg = f"Runner could not send new requests for {self._response_wait_t}s" self._errors.append(error_msg) self._sync.notify_all() break request_id = str(ids[0]) callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names) client.async_infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, callback=callback, request_id=request_id, ) self._num_waiting_for += 1 self._sync.notify_all() # wait till receive all requested data with self._sync: def _all_processed(): LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs") return self._num_waiting_for == 0 self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S) if not self._processed_all: error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server" self._errors.append(error_msg) self._sync.notify_all() LOGGER.debug("Finished request thread") def _verify_triton_state(self, triton_client): errors = [] if not triton_client.is_server_live(): errors.append(f"Triton server {self._server_url} is not live") elif not triton_client.is_server_ready(): errors.append(f"Triton server {self._server_url} is not ready") elif not triton_client.is_model_ready(self._model_name, self._model_version): errors.append(f"Model {self._model_name}:{self._model_version} is not ready") return errors def _parse_args(): parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False) parser.add_argument( "--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)" ) parser.add_argument("--model-name", help="The name of the model used for inference.", required=True) parser.add_argument("--model-version", help="The version of the model used for inference.", required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True) parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved") parser.add_argument( "--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float ) parser.add_argument( "--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", default=128, type=int, ) parser.add_argument( "--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False ) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) args = parser.parse_args() return args def main(): args = _parse_args() log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" log_level = logging.INFO if not args.verbose else logging.DEBUG logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) try: if args.synchronous: runner = SyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, ) else: runner = AsyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, max_unresponded_reqs=args.max_unresponded_requests, ) except Exception as e: message = traceback.format_exc() LOGGER.error(f"Encountered exception \n{message}") raise e with JsonDumpWriter(output_dir=args.output_dir) as writer: start = time.time() for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10): data = _verify_and_format_dump(args, ids, x, y_pred, y_real) data = copy.deepcopy(data) writer.write(**data) stop = time.time() LOGGER.info(f"\nThe inference took {stop - start:0.3f}s") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/run_inference_on_triton.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os from pathlib import Path from tqdm import tqdm # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0" from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, load_from_file, ) LOGGER = logging.getLogger("prepare_input_data") def _parse_and_validate_args(): parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--input-data-dir", help="Path to dir where output files will be stored", required=True) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) args = parser.parse_args() return args def main(): args = _parse_and_validate_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") data = [] get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) LOGGER.info("Data loader initialized; Creating benchmark data") for _, x, _ in tqdm(dataloader_fn(), unit="batch", mininterval=10): for input__0, input__1, input__2 in zip(x["input__0"], x["input__1"], x["input__2"]): data.append( { "input__0": input__0.tolist(), "input__1": input__1.tolist(), "input__2": input__2.tolist(), } ) LOGGER.info("Dumping data") with open(Path(args.input_data_dir) / "data.json", "w") as fd: fd.write(json.dumps({"data": data})) LOGGER.info("Dumped") if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/prepare_input_data.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from argparse import Namespace # import torch from modeling import BertConfig, BertForQuestionAnswering def update_argparser(parser): parser.add_argument("--precision", type=str, choices=["fp16", "fp32"], default="fp32") parser.add_argument("--checkpoint", type=str, default='', help="The checkpoint of the model.") parser.add_argument("--config-file", default=None, type=str, required=True, help="The BERT model config.") parser.add_argument("--fixed-batch-dim", default=False, action="store_true") parser.add_argument("--cpu", default=False, action="store_true") def get_model_from_args(args): config = BertConfig.from_json_file(args.config_file) if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) class BertForQuestionAnswering_int32_inputs(BertForQuestionAnswering): def forward(self, input_ids, segment_ids, attention_mask): input_ids, segment_ids, attention_mask = input_ids.long(), segment_ids.long(), attention_mask.long() return super().forward(input_ids, segment_ids, attention_mask) model = BertForQuestionAnswering_int32_inputs(config) model.enable_apex(False) if os.path.isfile(args.checkpoint): state_dict = torch.load(args.checkpoint, map_location="cpu") state_dict = state_dict["model"] if "model" in state_dict.keys() else state_dict model.load_state_dict(state_dict, strict=False) if args.precision == "fp16": model = model.half() device = "cuda:0" if not args.cpu else "cpu" model = model.to(device) model.eval() model.bermuda_batch_axis = 0 if not args.fixed_batch_dim else None return model def get_model(**model_args): """return model, ready to be traced and tensor names""" args = Namespace(**model_args) model = get_model_from_args(args) tensor_names = {"inputs": ["input__0", "input__1", "input__2"], "outputs": ["output__0", "output__1"]} return model, tensor_names
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/model.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model on framework runtime, you can use `run_inference_on_fw.py` script. It infers data obtained from pointed data loader locally and saves received data into dump files. Those files are stored in directory pointed by `--output-dir` argument. Example call: ```shell script python ./triton/run_inference_on_fw.py \ --input-path /models/exported/model.onnx \ --input-type onnx \ --dataloader triton/dataloader.py \ --data-dir /data/imagenet \ --batch-size 32 \ --output-dir /results/dump_local \ --dump-labels ``` """ import argparse import logging import os import copy from pathlib import Path from tqdm import tqdm # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0" from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseRunner, load_from_file, ) from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file LOGGER = logging.getLogger("run_inference_on_fw") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data def _parse_and_validate_args(): supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions) parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model", required=True) parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) Runner: BaseRunner = runners.get(args.input_type) ArgParserGenerator(Runner).update_argparser(parser) args = parser.parse_args() types_requiring_io_params = [] if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]): parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters") return args def main(): args = _parse_and_validate_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") Loader: BaseLoader = loaders.get(args.input_type) Runner: BaseRunner = runners.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) runner = ArgParserGenerator(Runner).from_args(args) LOGGER.info(f"Loading {args.input_path}") model = loader.load(args.input_path) with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) LOGGER.info("Data loader initialized; Running inference") for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10): y_pred = runner_session(x) data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real) data = copy.deepcopy(data) writer.write(**data) LOGGER.info("Inference finished") if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/run_inference_on_fw.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from run_squad import convert_examples_to_features, read_squad_examples from tokenization import BertTokenizer def get_dataloader_fn( precision : str = 'fp32', batch_size: int = 8, vocab_file: str = "", do_lower_case: bool = True, predict_file: str = "", max_len: int = 512, max_seq_length: int = 384, doc_stride: int = 128, max_query_length: int = 64, version_2_with_negative: bool = False, pad_to_batch_size: bool = True, ): # Preprocess input data tokenizer = BertTokenizer(vocab_file, do_lower_case=do_lower_case, max_len=max_len) eval_examples = read_squad_examples( input_file=predict_file, is_training=False, version_2_with_negative=version_2_with_negative ) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, ) # get inputs all_unique_ids = [f.unique_id for f in eval_features] all_input_ids = [f.input_ids for f in eval_features] all_input_mask = [f.input_mask for f in eval_features] all_segment_ids = [f.segment_ids for f in eval_features] if pad_to_batch_size: # each batch should have a fixed size f = eval_features[-1] padding = batch_size - (len(all_unique_ids) % batch_size) all_unique_ids += [f.unique_id for _ in range(padding)] all_input_ids += [f.input_ids for _ in range(padding)] all_input_mask += [f.input_mask for _ in range(padding)] all_segment_ids += [f.segment_ids for _ in range(padding)] all_unique_ids = torch.tensor(all_unique_ids, dtype=torch.int32, requires_grad=False) all_input_ids = torch.tensor(all_input_ids, dtype=torch.int32, requires_grad=False) all_input_mask = torch.tensor(all_input_mask, dtype=torch.int32, requires_grad=False) all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.int32, requires_grad=False) eval_data = torch.utils.data.TensorDataset(all_unique_ids, all_input_ids, all_input_mask, all_segment_ids) eval_sampler = torch.utils.data.SequentialSampler(eval_data) eval_dataloader = torch.utils.data.DataLoader( eval_data, sampler=eval_sampler, batch_size=batch_size, shuffle=False, num_workers=0, ) dtype = { 'fp32' : np.float32, 'fp16' : np.float16 } dtype = dtype[precision] def _get_dataloader(): """return dataloader for inference""" for unique_id, input_ids, input_mask, segment_ids in eval_dataloader: unique_id = unique_id.cpu().numpy() input_ids = input_ids.cpu().numpy() input_mask = input_mask.cpu().numpy() segment_ids = segment_ids.cpu().numpy() x = {"input__0": input_ids, "input__1": segment_ids, "input__2": input_mask} y_real = { "output__0": np.zeros([batch_size, max_seq_length], dtype=dtype), "output__1": np.zeros([batch_size, max_seq_length], dtype=dtype), } yield (unique_id, x, y_real) return _get_dataloader
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/dataloader.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import logging import os import pathlib import shutil from distutils.version import LooseVersion from enum import Enum from importlib.metadata import version from typing import Any, Dict, List import yaml # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.utils import parse_server_url from .deployment_toolkit.warmup import performance_evaluation_warmup LOGGER = logging.getLogger("run_performance_on_triton") TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) def _log_dict(title: str, dict_: Dict[str, Any]): LOGGER.info(title) for key, value in dict_.items(): LOGGER.info(f"\t{key} = {value}") def _calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def _update_performance_data(results: List, batch_size: int, performance_partial_file: str): row: Dict = {"Batch": batch_size} with open(performance_partial_file) as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = _calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row) def _model_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, model_repository: str, result_path: str, output_shared_memory_size: int = 102400, verbose: bool = False, ): _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "model_repository": model_repository, "result_path": result_path, "verbose": verbose, }, ) perf_analyzer_config = { "input-data": input_data, "measurement-interval": measurement_interval, } if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): perf_analyzer_config["measurement-mode"] = measurement_mode.value perf_analyzer_config["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: perf_analyzer_config["shared-memory"] = offline_mode.value perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size if input_shapes: perf_analyzer_config["shape"] = input_shapes[0] LOGGER.warning("Model Analyzer support only single shape param for Perf Analyzer.") if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes concurrency = [number_of_triton_instances] elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step} batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") protocol, host, port = parse_server_url(server_url) checkpoints = pathlib.Path("./checkpoints") if checkpoints.is_dir(): shutil.rmtree(checkpoints.as_posix()) checkpoints.mkdir(parents=True, exist_ok=True) config = { "model_repository": model_repository, "triton_launch_mode": "remote", "run_config_search_disable": True, "perf_analyzer_flags": perf_analyzer_config, "perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h "profile_models": [model_name], "batch_sizes": batch_sizes, "concurrency": concurrency, "verbose": verbose, "checkpoint_directory": checkpoints.as_posix(), "override_output_model_repository": True, "client_protocol": protocol, f"triton_{protocol}_endpoint": f"{host}:{port}", } if verbose: _log_dict("Model Analyzer profiling configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose) result_path = pathlib.Path(result_path) result_path.mkdir(parents=True, exist_ok=True) for file in checkpoints.iterdir(): if not file.is_file() or file.suffix != ".ckpt": continue LOGGER.info(f"Moving checkpoint {file.name} to {result_path}") shutil.move(file, result_path / file.name) inference_output_fields = [ "batch_size", "concurrency", "perf_throughput", "perf_latency", "perf_client_send_recv", "perf_client_response_wait", "perf_server_queue", "perf_server_compute_input", "perf_server_compute_infer", "perf_server_compute_output", ] gpu_output_fields = [ "gpu_uuid", "batch_size", "concurrency", "gpu_used_memory", "gpu_free_memory", "gpu_utilization", "gpu_power_usage", ] filename_model_inference = "metrics-model-inference.csv" filename_model_gpu = "metrics-model-gpu.csv" config = { "analysis_models": model_name, "checkpoint_directory": result_path.as_posix(), "export_path": "/tmp", "inference_output_fields": inference_output_fields, "gpu_output_fields": gpu_output_fields, "filename_model_inference": filename_model_inference, "filename_model_gpu": filename_model_gpu, "summarize": False, } if verbose: _log_dict("Model Analyzer analysis configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose) inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu for file in [inference_metrics_file, gpu_metrics_file]: LOGGER.info(f"Moving metrics {file.name} to {result_path}") shutil.move(file, result_path / file.name) def _perf_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, result_path: str, output_shared_memory_size: int = 102400, verbose: bool = False, ): protocol, host, port = parse_server_url(server_url) if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes max_concurrency = 1 min_concurrency = 1 step = 1 elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "result_path": result_path, "verbose": verbose, }, ) results: List[Dict] = list() for batch_size in batch_sizes: for concurrency in range(min_concurrency, max_concurrency + step, step): performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv" params = { "model-name": model_name, "model-version": 1, "batch-size": batch_size, "url": f"{host}:{port}", "protocol": protocol, "input-data": input_data, "measurement-interval": measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", "latency-report-file": performance_partial_file, } if verbose: params["extra-verbose"] = True if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = measurement_mode.value params["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = offline_mode.value params["output-shared-memory-size"] = output_shared_memory_size if verbose: _log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params) config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config) perf_analyzer.run() _update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=result_path, data=results) show_results(results=results) def _run_performance_analysis( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, output_shared_memory_size: int, performance_tool: PerformanceTool, model_repository: str, result_path: str, warmup: bool, verbose: bool, ): log_level = logging.INFO if not verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) if warmup: LOGGER.info("Running warmup before the main test") performance_evaluation_warmup( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, ) if performance_tool == PerformanceTool.MODEL_ANALYZER: LOGGER.info("Using Model Analyzer for performance evaluation") _model_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, model_repository=model_repository, result_path=result_path, verbose=verbose, ) elif performance_tool == PerformanceTool.PERF_ANALYZER: LOGGER.info("Using Perf Analyzer for performance evaluation") _perf_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, result_path=result_path, verbose=verbose, ) else: raise ValueError(f"Unsupported performance tool {performance_tool}") class MeasurementMode(Enum): """ Available measurement stabilization modes """ COUNT_WINDOWS = "count_windows" TIME_WINDOWS = "time_windows" def main(): parser = argparse.ArgumentParser() parser.add_argument( "--server-url", type=str, required=False, default="grpc://127.0.0.1:8001", help="Url to Triton server", ) parser.add_argument( "--model-name", type=str, required=True, help="Name of the model to test", ) parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling.", ) parser.add_argument( "--input-shapes", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument( "--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.", ) parser.add_argument( "--number-of-triton-instances", type=int, default=1, help="Number of Triton Server instances", ) parser.add_argument( "--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server", ) parser.add_argument( "--measurement-mode", choices=[item.value for item in MeasurementMode], default=MeasurementMode.COUNT_WINDOWS.value, type=str, help="Select measurement mode " "'time_windows' stabilize performance on measurement window. " "'count_windows' stabilize performance on number of samples.", ) parser.add_argument( "--measurement-interval", required=False, help="Time window perf_analyzer will wait to stabilize the measurement", default=5000, type=int, ) parser.add_argument( "--measurement-request-count", required=False, help="Number of samples on which perf_analyzer will stabilize the measurement", default=50, type=int, ) parser.add_argument( "--concurrency-steps", help="Define number of concurrency steps used for dynamic batching tests", default=32, type=int, ) parser.add_argument( "--batching-mode", choices=[item.value for item in BatchingMode], default=BatchingMode.STATIC.value, type=str, help="Select batching mode " "'static' run static batching scenario. " "'dynamic' run dynamic batching scenario.", ) parser.add_argument( "--evaluation-mode", choices=[item.value for item in EvaluationMode], default=EvaluationMode.OFFLINE.value, type=str, help="Select evaluation mode " "'offline' run offline analysis and use GPU memory to pass tensors. " "'online' run online analysis and use HTTP protocol.", ) parser.add_argument( "--offline-mode", choices=[item.value for item in OfflineMode], default=OfflineMode.SYSTEM.value, type=str, help="Select offline mode " "'system' pass tensors through CPU RAM memory. " "'cuda' pass tensors through GPU RAM memory.", ) parser.add_argument( "--output-shared-memory-size", default=100240, type=int, help="Size of memory buffer allocated for output with dynamic shapes in bytes. " "Has to be equal to maximal size of output tensor.", ) parser.add_argument( "--performance-tool", choices=[item.value for item in PerformanceTool], default=PerformanceTool.MODEL_ANALYZER.value, type=str, help="Select performance tool for measurement mode " "'model_analyzer' use Model Analyzer " "'perf_analyzer' use Perf Analyzer", ) parser.add_argument( "--model-repository", default=None, type=str, help="Path to model repository. Valid when using Model Analyzer", ) parser.add_argument("--result-path", type=str, required=True, help="Path where results files is stored.") parser.add_argument( "--warmup", help="Enable model warmup before performance test", action="store_true", default=False ) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args = parser.parse_args() batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(","))) _run_performance_analysis( server_url=args.server_url, model_name=args.model_name, input_data=args.input_data, input_shapes=args.input_shapes or [], batch_sizes=batch_sizes, number_of_triton_instances=args.number_of_triton_instances, number_of_model_instances=args.number_of_model_instances, measurement_mode=MeasurementMode(args.measurement_mode), measurement_interval=args.measurement_interval, measurement_request_count=args.measurement_request_count, concurrency_steps=args.concurrency_steps, batching_mode=BatchingMode(args.batching_mode), evaluation_mode=EvaluationMode(args.evaluation_mode), offline_mode=OfflineMode(args.offline_mode), output_shared_memory_size=args.output_shared_memory_size, performance_tool=PerformanceTool(args.performance_tool), model_repository=args.model_repository, result_path=args.result_path, warmup=args.warmup, verbose=args.verbose, ) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/run_performance_on_triton.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.pipeline import Pipeline pipeline = Pipeline() pipeline.model_export( commands=( r""" if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi if [[ "${EXPORT_FORMAT}" == "trt" ]]; then export FLAG="--fixed-batch-dim" else export FLAG="" fi python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --dataloader triton/dataloader.py \ --ignore-unknown-parameters \ --onnx-opset 13 \ ${FLAG} \ \ --config-file bert_configs/large.json \ --checkpoint ${CHECKPOINT_DIR}/bert_large_qa.pt \ --precision ${EXPORT_PRECISION} \ \ --vocab-file ${DATASETS_DIR}/data/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt \ --max-seq-length ${MAX_SEQ_LENGTH} \ --predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \ --batch-size ${MAX_BATCH_SIZE} """, ) ) pipeline.model_conversion( commands=( r""" if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi if [ "${EXPORT_FORMAT}" != "${FORMAT}" ]; then model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --inputs input__0:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --inputs input__1:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --inputs input__2:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --min-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --max-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --opt-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --max-batch-size ${MAX_BATCH_SIZE} \ --tensorrt-max-workspace-size 8589934592 \ --atol 2 output__0=5.0 \ output__1=5.0 \ --rtol 1 output__0=5.0 \ output__1=5.0 \ | grep -v "broadcasting input1 to make tensors conform" else mv ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} ${SHARED_DIR}/converted_model mv ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX}.yaml ${SHARED_DIR}/converted_model.yaml 2>/dev/null || true fi """, ) ) pipeline.model_deploy( commands=( r""" if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then export CONFIG_FORMAT="torchscript" else export CONFIG_FORMAT="${FORMAT}" fi if [[ "${FORMAT}" == "trt" ]]; then export MBS="0" else export MBS="${MAX_BATCH_SIZE}" fi model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${CONFIG_FORMAT} \ --model-control-mode ${TRITON_LOAD_MODEL_METHOD} \ --verbose \ --load-model \ --load-model-timeout-s 100 \ \ --backend-accelerator ${ACCELERATOR} \ --tensorrt-precision ${ACCELERATOR_PRECISION} \ --max-batch-size ${MBS} \ --preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \ --max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \ --engine-count-per-device gpu=${TRITON_GPU_ENGINE_COUNT} """, ) ) pipeline.triton_prepare_performance_profiling_data( commands=( r""" mkdir -p ${SHARED_DIR}/input_data """, r""" python triton/prepare_input_data.py \ --dataloader triton/dataloader.py \ --input-data-dir ${SHARED_DIR}/input_data \ \ --batch-size ${MAX_BATCH_SIZE} \ --max-seq-length ${MAX_SEQ_LENGTH} \ --predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \ --vocab-file ${DATASETS_DIR}/data/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt """, ) ) pipeline.triton_performance_offline_tests( commands=( r""" python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data ${SHARED_DIR}/input_data/data.json \ --input-shapes input__0:${MAX_SEQ_LENGTH} \ --input-shapes input__1:${MAX_SEQ_LENGTH} \ --input-shapes input__2:${MAX_SEQ_LENGTH} \ --batch-sizes ${BATCH_SIZE} \ --number-of-triton-instances ${TRITON_INSTANCES} \ --number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \ --batching-mode static \ --evaluation-mode offline \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv """, ), result_path="${SHARED_DIR}/triton_performance_offline.csv", )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/large/runner/pipeline_impl.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import pathlib from typing import List if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.config import Config from ...runner.executor import Executor from ...runner.finalizer import ExperimentFinalizer from ...runner.maintainer import DockerMaintainer from ...runner.preparer import ExperimentPreparer from ...runner.runner_proxy import RunnerProxy from .pipeline_impl import pipeline class ExperimentRunner(RunnerProxy): """ Experiment Runner proxy for runner wrapper """ maintainer_cls = DockerMaintainer executor_cls = Executor preparer_cls = ExperimentPreparer finalizer_cls = ExperimentFinalizer def execute(config_path: str, devices: List[str]): if len(devices) == 0: devices = ["0"] config = Config.from_file(config_path) runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices) runner.start() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.") parser.add_argument( "--devices", type=str, nargs="*", required=False, help="Path to configuration file with details." ) args = parser.parse_args() config_path = args.config_path devices = args.devices execute(config_path, devices)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/large/runner/__main__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import importlib import logging import os from enum import Enum from pathlib import Path from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np LOGGER = logging.getLogger(__name__) DATALOADER_FN_NAME = "get_dataloader_fn" GET_MODEL_FN_NAME = "get_model" GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn" GET_ARGPARSER_FN_NAME = "update_argparser" class TensorSpec(NamedTuple): name: str dtype: str shape: Tuple class Parameter(Enum): def __lt__(self, other: "Parameter") -> bool: return self.value < other.value def __str__(self): return self.value class Accelerator(Parameter): NONE = "none" AMP = "amp" TRT = "trt" CUDA = NONE # backward compatibility class Precision(Parameter): INT8 = "int8" FP16 = "fp16" FP32 = "fp32" TF32 = "tf32" # Deprecated class Format(Parameter): TF_GRAPHDEF = "tf-graphdef" TF_SAVEDMODEL = "tf-savedmodel" TF_TRT = "tf-trt" TF_ESTIMATOR = "tf-estimator" TF_KERAS = "tf-keras" ONNX = "onnx" TRT = "trt" TS_SCRIPT = "ts-script" TS_TRACE = "ts-trace" PYT = "pyt" FASTERTRANSFORMER = "fastertransformer" class Model(NamedTuple): handle: object # TODO: precision should be removed precision: Optional[Precision] inputs: Dict[str, TensorSpec] outputs: Dict[str, TensorSpec] def load_from_file(file_path, label, target): spec = importlib.util.spec_from_file_location(name=label, location=file_path) my_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(my_module) # pytype: disable=attribute-error return getattr(my_module, target, None) class BaseLoader(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def load(self, model_path: Union[str, Path], **kwargs) -> Model: """ Loads and process model from file based on given set of args """ pass class BaseSaver(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: """ Save model to file """ pass class BaseRunner(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def init_inference(self, model: Model): raise NotImplementedError class BaseRunnerSession(abc.ABC): def __init__(self, model: Model): self._model = model @abc.abstractmethod def __enter__(self): raise NotImplementedError() @abc.abstractmethod def __exit__(self, exc_type, exc_value, traceback): raise NotImplementedError() @abc.abstractmethod def __call__(self, x: Dict[str, object]): raise NotImplementedError() def _set_env_variables(self) -> Dict[str, object]: """this method not remove values; fix it if needed""" to_set = {} old_values = {k: os.environ.pop(k, None) for k in to_set} os.environ.update(to_set) return old_values def _recover_env_variables(self, old_envs: Dict[str, object]): for name, value in old_envs.items(): if value is None: del os.environ[name] else: os.environ[name] = str(value) class BaseConverter(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def convert(self, model: Model, dataloader_fn) -> Model: raise NotImplementedError() @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: return requested_model_precision class BaseMetricsCalculator(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None def calc( self, *, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ) -> Dict[str, float]: """ Calculates error/accuracy metrics Args: ids: List of ids identifying each sample in the batch y_pred: model output as dict where key is output name and value is output value x: model input as dict where key is input name and value is input value y_real: input ground truth as dict where key is output name and value is output value Returns: dictionary where key is metric name and value is its value """ pass @abc.abstractmethod def update( self, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ): pass @property @abc.abstractmethod def metrics(self) -> Dict[str, Any]: pass class ShapeSpec(NamedTuple): min: Tuple opt: Tuple max: Tuple class MeasurementMode(Enum): COUNT_WINDOWS = "count_windows" TIME_WINDOWS = "time_windows" class PerformanceTool(Enum): """ Available performance evaluation tools """ MODEL_ANALYZER = "model_analyzer" PERF_ANALYZER = "perf_analyzer" class BatchingMode(Enum): """ Available batching modes """ STATIC = "static" DYNAMIC = "dynamic" class EvaluationMode(Enum): """ Available evaluation modes """ OFFLINE = "offline" ONLINE = "online" class OfflineMode(Enum): SYSTEM = "system" CUDA = "cuda"
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/core.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json import pickle import threading from pathlib import Path from typing import Dict, Iterator, List, Union import numpy as np MB2B = 2 ** 20 B2MB = 1 / MB2B FLUSH_THRESHOLD_B = 256 * MB2B def _validate_batch(name: str, value: Union[list, np.ndarray]): if not isinstance(value, (list, np.ndarray)): raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}") def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]): batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()} names = list(batch_sizes_per_io_name) for io_name in names: for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]): if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]): non_equal_batch_sizes = { other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names } non_equal_batch_sizes_str = ", ".join( [f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()] ) raise ValueError( "All inputs/outputs should have same number of batches with equal batch_size. " f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}" ) # ensure if each io has same number of batches with equal size def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]): nitems = 0 nbatches = 0 if prefix_data: nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()} nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()} nitems = list(nitems_per_io_name.values())[0] nbatches = list(nbatches_per_io_name.values())[0] return nitems, nbatches class BaseDumpWriter(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, output_dir: Union[str, Path]): self._output_dir = Path(output_dir) # outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name # list is list of batches self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {} # key is prefix self._items_counters: Dict[str, int] = {} self._cache_lock = threading.RLock() self._flush_threshold_b = FLUSH_THRESHOLD_B @property def cache_size(self): def _get_bytes_size(name, batch): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.narray(batch) return batch.nbytes with self._cache_lock: return { prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches) for prefix, data in self._items_cache.items() } def _append_to_cache(self, prefix, prefix_data): if prefix_data is None: return if not isinstance(prefix_data, dict): raise ValueError(f"{prefix} data to store shall be dict") with self._cache_lock: cached_prefix_data = self._items_cache.setdefault(prefix, {}) for name, batch in prefix_data.items(): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.array(batch) cached_batches = cached_prefix_data.setdefault(name, []) cached_batches += [batch] def write(self, **kwargs): with self._cache_lock: for prefix, prefix_data in kwargs.items(): self._append_to_cache(prefix, prefix_data) biggest_prefix_data_size = max(self.cache_size.values()) if biggest_prefix_data_size > self._flush_threshold_b: self.flush() def flush(self): with self._cache_lock: for prefix, prefix_data in self._items_cache.items(): _validate_prefix_data(prefix_data) output_path = self._output_dir / self._get_filename(prefix) self._dump(prefix_data, output_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) self._items_counters[prefix] += nitems self._items_cache = {} def _get_filename(self, prefix): idx = self._items_counters.setdefault(prefix, 0) return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}" @abc.abstractmethod def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): pass def __enter__(self): if self._output_dir.exists() and len(list(self._output_dir.iterdir())): raise ValueError(f"{self._output_dir.as_posix()} is not empty") self._output_dir.mkdir(parents=True, exist_ok=True) return self def __exit__(self, exc_type, exc_val, exc_tb): self.flush() class PickleDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".pkl" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("wb") as pickle_file: pickle.dump(prefix_data, pickle_file) class JsonDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".json" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): repacked_prefix_data = self._format_data(prefix_data) output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("w") as json_file: json.dump(repacked_prefix_data, json_file) def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict: def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray): return { "content": batch.flatten().tolist(), "shape": list(batch.shape), "dtype": str(batch.dtype), } _, nbatches = _get_nitems_and_batches(prefix_data) batches = [{} for _ in range(nbatches)] for io_name, batches_per_io in prefix_data.items(): for batch_idx, batch in enumerate(batches_per_io): batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch) return {"data": batches} class BaseDumpReader(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, dump_dir: Union[Path, str]): self._dump_dir = Path(dump_dir) def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]: dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}")) for dump_file_path in dump_files_paths: prefix_data = self._load_file(dump_file_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) for batch_idx in range(nbatches): yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data} @abc.abstractmethod def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: pass def iterate_over(self, prefix_list: List[str]) -> Iterator: iterators = [self.get(prefix) for prefix in prefix_list] empty_iterators = [False] * len(iterators) while not all(empty_iterators): values = [None] * len(iterators) for idx, iterator in enumerate(iterators): if empty_iterators[idx]: continue try: values[idx] = next(iterator) except StopIteration: empty_iterators[idx] = True if all(empty_iterators): break if not all(empty_iterators): yield values def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class PickleDumpReader(BaseDumpReader): FILE_SUFFIX = ".pkl" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as pickle_file: return pickle.load(pickle_file) class JsonDumpReader(BaseDumpReader): FILE_SUFFIX = ".json" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as json_file: data = json.load(json_file) return self._repack_data(data) def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]: result: Dict[str, List[np.ndarray]] = {} batches = data["data"] for batch in batches: for io_name, batch_as_dict in batch.items(): io_batches = result.setdefault(io_name, []) flat_array = batch_as_dict["content"] shape = batch_as_dict["shape"] dtype = batch_as_dict["dtype"] batch_as_array = np.array(flat_array).reshape(shape).astype(dtype) io_batches.append(batch_as_array) return result
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/dump.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os import re from pathlib import Path from typing import List LOGGER = logging.getLogger(__name__) class ExtensionManager: def __init__(self, name: str): self._name = name self._registry = {} def register_extension(self, extension: str, clazz): already_registered_class = self._registry.get(extension, None) if already_registered_class and already_registered_class.__module__ != clazz.__module__: raise RuntimeError( f"Conflicting extension {self._name}/{extension}; " f"{already_registered_class.__module__}.{already_registered_class.__name} " f"and " f"{clazz.__module__}.{clazz.__name__}" ) elif already_registered_class is None: clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None" LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}") self._registry[extension] = clazz def get(self, extension): if extension not in self._registry: raise RuntimeError(f"Missing extension {self._name}/{extension}") return self._registry[extension] @property def supported_extensions(self): return list(self._registry) @staticmethod def scan_for_extensions(extension_dirs: List[Path]): register_pattern = r".*\.register_extension\(.*" for extension_dir in extension_dirs: for python_path in extension_dir.rglob("*.py"): if not python_path.is_file(): continue payload = python_path.read_text() if re.findall(register_pattern, payload): import_path = python_path.relative_to(toolkit_root_dir.parent) package = import_path.parent.as_posix().replace(os.sep, ".") package_with_module = f"{package}.{import_path.stem}" spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path) my_module = importlib.util.module_from_spec(spec) my_module.__package__ = package try: spec.loader.exec_module(my_module) # pytype: disable=attribute-error except ModuleNotFoundError as e: LOGGER.error( f"Could not load extensions from {import_path} due to missing python packages; {e}" ) runners = ExtensionManager("runners") loaders = ExtensionManager("loaders") savers = ExtensionManager("savers") converters = ExtensionManager("converters") toolkit_root_dir = (Path(__file__).parent / "..").resolve() ExtensionManager.scan_for_extensions([toolkit_root_dir])
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/extensions.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib from distutils.version import LooseVersion from importlib.metadata import version from typing import List TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode from .perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig from .utils import parse_server_url LOGGER = logging.getLogger("warmup") def performance_evaluation_warmup( server_url: str, model_name: str, batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, input_data: str, input_shapes: List[str], measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, batching_mode: BatchingMode, offline_mode: OfflineMode, evaluation_mode: EvaluationMode, output_shared_memory_size: int, ): protocol, host, port = parse_server_url(server_url) measurement_interval = 2 * measurement_interval measurement_request_count = 2 * measurement_request_count if batching_mode == BatchingMode.STATIC: batch_sizes = sorted({1, batch_sizes[-1]}) max_concurrency = 1 min_concurrency = 1 step = 1 elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // 2) min_concurrency = step batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") for batch_size in batch_sizes: for concurrency in range(min_concurrency, max_concurrency + step, step): params = { "model-name": model_name, "model-version": 1, "batch-size": batch_size, "url": f"{host}:{port}", "protocol": protocol, "input-data": input_data, "measurement-interval": measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", "output-shared-memory-size": output_shared_memory_size, } if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = measurement_mode.value params["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = offline_mode.value params["output-shared-memory-size"] = output_shared_memory_size config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config) perf_analyzer.run()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/warmup.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Tuple LOGGER = logging.getLogger(__name__) def parse_server_url(server_url: str) -> Tuple[str, str, int]: DEFAULT_PORTS = {"http": 8000, "grpc": 8001} # extract protocol server_url_items = server_url.split("://") if len(server_url_items) != 2: raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001") requested_protocol, server_url = server_url_items requested_protocol = requested_protocol.lower() if requested_protocol not in DEFAULT_PORTS: raise ValueError(f"Unsupported protocol: {requested_protocol}") # extract host and port default_port = DEFAULT_PORTS[requested_protocol] server_url_items = server_url.split(":") if len(server_url_items) == 1: host, port = server_url, default_port elif len(server_url_items) == 2: host, port = server_url_items port = int(port) if port != default_port: LOGGER.warning( f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}" ) else: raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001") return requested_protocol, host, port
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import logging from typing import Callable, Dict, Optional, Union from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic from .core import GET_ARGPARSER_FN_NAME, load_from_file LOGGER = logging.getLogger(__name__) def str2bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("Boolean value expected.") def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict: signature = inspect.signature(fn) parameters_names = list(signature.parameters) if isinstance(args, argparse.Namespace): args = vars(args) args = {k: v for k, v in args.items() if k in parameters_names} return args def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser: parser.conflict_handler = "resolve" signature = inspect.signature(fn) for parameter in signature.parameters.values(): if parameter.name in ["self", "args", "kwargs"]: continue argument_kwargs = {} if parameter.annotation != inspect.Parameter.empty: is_optional = is_optional_generic(parameter.annotation) if is_optional: annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None] else: annotation = parameter.annotation is_list = is_list_generic(annotation) is_dict = is_dict_generic(annotation) if parameter.annotation == bool: argument_kwargs["type"] = str2bool argument_kwargs["choices"] = [0, 1] elif is_list: argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls elif is_dict: raise RuntimeError( f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}" ) else: argument_kwargs["type"] = annotation if parameter.default != inspect.Parameter.empty: if parameter.annotation == bool: argument_kwargs["default"] = str2bool(parameter.default) else: argument_kwargs["default"] = parameter.default else: argument_kwargs["required"] = True name = parameter.name.replace("_", "-") LOGGER.debug(f"Adding argument {name} with {argument_kwargs}") parser.add_argument(f"--{name}", **argument_kwargs) return parser class ArgParserGenerator: def __init__(self, cls_or_fn, module_path: Optional[str] = None): self._cls_or_fn = cls_or_fn init_method_name = "__init__" self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None) input_is_python_file = module_path and module_path.endswith(".py") self._input_path = module_path if input_is_python_file else None self._required_fn_name_for_signature_parsing = getattr( cls_or_fn, "required_fn_name_for_signature_parsing", None ) def update_argparser(self, parser): name = self._handle.__name__ group_parser = parser.add_argument_group(name) add_args_for_fn_signature(group_parser, fn=self._handle) self._update_argparser(group_parser) def get_args(self, args: argparse.Namespace): filtered_args = filter_fn_args(args, fn=self._handle) tmp_parser = argparse.ArgumentParser(allow_abbrev=False) self._update_argparser(tmp_parser) custom_names = [ p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction) ] custom_params = {n: getattr(args, n) for n in custom_names} filtered_args = {**filtered_args, **custom_params} return filtered_args def from_args(self, args: Union[argparse.Namespace, Dict]): args = self.get_args(args) LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})") return self._cls_or_fn(**args) def _update_argparser(self, parser): label = "argparser_update" if self._input_path: update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME) if update_argparser_handle: update_argparser_handle(parser) elif self._required_fn_name_for_signature_parsing: fn_handle = load_from_file( self._input_path, label=label, target=self._required_fn_name_for_signature_parsing ) if fn_handle: add_args_for_fn_signature(parser, fn_handle)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/args.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import re from typing import Dict, List from natsort import natsorted from tabulate import tabulate def sort_results(results: List): results = natsorted(results, key=lambda item: [item[key] for key in item.keys()]) return results def save_results(filename: str, data: List, formatted: bool = False): data = format_data(data=data) if formatted else data with open(filename, "a") as csvfile: fieldnames = data[0].keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in data: writer.writerow(row) def format_data(data: List[Dict]) -> List[Dict]: formatted_data = list() for item in data: formatted_item = format_keys(data=item) formatted_data.append(formatted_item) return formatted_data def format_keys(data: Dict) -> Dict: keys = {format_key(key=key): value for key, value in data.items()} return keys def format_key(key: str) -> str: key = " ".join([k.capitalize() for k in re.split("_| ", key)]) return key def show_results(results: List[Dict]): headers = list(results[0].keys()) summary = map(lambda x: list(map(lambda item: item[1], x.items())), results) print(tabulate(summary, headers=headers))
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/report.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision # pytype: enable=import-error LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple(_get_dim(d) for d in shape.dim) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda/onnx.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Callable, Dict, List, Optional import networkx as nx from ..core import ShapeSpec def infer_precision( nx_graph: nx.Graph, input_names: List[str], output_names: List[str], get_node_dtype_fn: Callable, ): node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes] node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]] dtypes_counter = Counter(node_dtypes) return dtypes_counter.most_common()[0][0] def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None): def _set_dynamic_shapes(t, shapes): for k, v in t.items(): shape = list(v.shape) for dim, s in enumerate(shape): if shapes[k][dim] != -1 and shapes[k][dim] != s: shapes[k][dim] = -1 def _mark_batch_axis(shape, batch_axis: int): shape = list(shape) shape[batch_axis] = -1 return tuple(shape) ## get all shapes from input and output tensors input_shapes = {} output_shapes = {} for batch in dataloader: _, x, y = batch for k, v in x.items(): input_shapes[k] = list(v.shape) for k, v in y.items(): output_shapes[k] = list(v.shape) break # based on max <max_num_iters> iterations, check which # dimensions differ to determine dynamic_axes max_num_iters = 100 for idx, batch in enumerate(dataloader): if idx >= max_num_iters: break _, x, y = batch _set_dynamic_shapes(x, input_shapes) _set_dynamic_shapes(y, output_shapes) if batch_size_dim is not None: input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()} output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()} return input_shapes, output_shapes def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None): input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim) all_shapes = {**input_shapes, **output_shapes} dynamic_axes = {} for k, shape in all_shapes.items(): for idx, s in enumerate(shape): if s == -1: dynamic_axes[k] = {idx: k + "_" + str(idx)} for k in all_shapes: if k in dynamic_axes: dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)}) elif batch_size_dim is not None: dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)} return dynamic_axes def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]: def init_counters_and_shapes(x, counters, min_shapes, max_shapes): for k, v in x.items(): counters[k] = Counter() min_shapes[k] = [float("inf")] * v.ndim max_shapes[k] = [float("-inf")] * v.ndim counters = {} min_shapes: Dict[str, tuple] = {} max_shapes: Dict[str, tuple] = {} for idx, batch in enumerate(dataloader): ids, x, y = batch if idx == 0: init_counters_and_shapes(x, counters, min_shapes, max_shapes) for k, v in x.items(): shape = v.shape counters[k][shape] += 1 min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape)) max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape)) opt_shapes: Dict[str, tuple] = {} for k, v in counters.items(): opt_shapes[k] = v.most_common(1)[0][0] shapes = {} for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes shapes[k] = ShapeSpec( min=(1,) + min_shapes[k][1:], max=(max_batch_size,) + max_shapes[k][1:], opt=(max_batch_size,) + opt_shapes[k][1:], ) return shapes
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda/utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys from pathlib import Path from typing import Dict, NamedTuple, Optional, Union import numpy as np # pytype: disable=import-error try: import pycuda.autoinit import pycuda.driver as cuda except Exception as e: logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}") # pytype: enable=import-error import tensorrt as trt # pytype: disable=import-error from ..core import BaseLoader, BaseRunner, BaseRunnerSession, Format, Model, TensorSpec from ..extensions import loaders, runners LOGGER = logging.getLogger(__name__) TRT_LOGGER = trt.Logger(trt.Logger.INFO) # documentation: # https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html # https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section _NP_DTYPE2TRT_DTYPE = { np.dtype("float32"): trt.DataType.FLOAT, np.dtype("float16"): trt.DataType.HALF, np.dtype("int8"): trt.DataType.INT8, np.dtype("int32"): trt.DataType.INT32, np.dtype("bool"): trt.DataType.BOOL, } class TensorRTLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: model_path = Path(model_path) LOGGER.debug(f"Loading TensorRT engine from {model_path}") engine = self._load_engine(model_path) if engine is None: LOGGER.debug("Unable to load engine without plugins. Loading plugins.") trt.init_libnvinfer_plugins(logger=TRT_LOGGER, namespace="") LOGGER.debug(f"Loading TensorRT engine with plugins from {model_path}") engine = self._load_engine(model_path) if engine is None: raise RuntimeError(f"Could not load ICudaEngine from {model_path}") inputs = {} outputs = {} for binding_idx in range(engine.num_bindings): name = engine.get_binding_name(binding_idx) is_input = engine.binding_is_input(binding_idx) dtype = np.dtype(trt.nptype(engine.get_binding_dtype(binding_idx))).name shape = engine.get_binding_shape(binding_idx) if is_input: inputs[name] = TensorSpec(name, dtype, shape) else: outputs[name] = TensorSpec(name, dtype, shape) return Model(engine, None, inputs, outputs) def _load_engine(self, model_path: Path): with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime: engine = runtime.deserialize_cuda_engine(fh.read()) return engine class TRTBuffers(NamedTuple): x_host: Optional[Dict[str, object]] x_dev: Dict[str, object] y_pred_host: Dict[str, object] y_pred_dev: Dict[str, object] class TensorRTRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return TensorRTRunnerSession(model=model) class TensorRTRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, trt.ICudaEngine) self._model = model self._has_dynamic_shapes = None self._context = None self._engine: trt.ICudaEngine = self._model.handle self._cuda_context = pycuda.autoinit.context self._input_names = None self._output_names = None self._buffers = None def __enter__(self): self._context = self._engine.create_execution_context() self._context.__enter__() self._input_names = [ self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx) ] self._output_names = [ self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx) ] # all_binding_shapes_specified is True for models without dynamic shapes # so initially this variable is False for models with dynamic shapes self._has_dynamic_shapes = not self._context.all_binding_shapes_specified return self def __exit__(self, exc_type, exc_value, traceback): self._context.__exit__(exc_type, exc_value, traceback) self._input_names = None self._output_names = None # TODO: are cuda buffers dealloc automatically? self._buffers = None def __call__(self, x): buffers = self._prepare_buffers_if_needed(x) bindings = self._update_bindings(buffers) for name in self._input_names: cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name]) self._cuda_context.push() self._context.execute_v2(bindings=bindings) self._cuda_context.pop() for name in self._output_names: cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name]) return buffers.y_pred_host def _update_bindings(self, buffers: TRTBuffers): bindings = [None] * self._engine.num_bindings for name in buffers.y_pred_dev: binding_idx: int = self._engine[name] bindings[binding_idx] = buffers.y_pred_dev[name] for name in buffers.x_dev: binding_idx: int = self._engine[name] bindings[binding_idx] = buffers.x_dev[name] return bindings def _set_dynamic_input_shapes(self, x_host): def _is_shape_dynamic(input_shape): return any([dim is None or dim == -1 for dim in input_shape]) for name in self._input_names: bindings_idx = self._engine[name] data_shape = x_host[name].shape # pytype: disable=attribute-error if self._engine.is_shape_binding(bindings_idx): input_shape = self._context.get_shape(bindings_idx) if _is_shape_dynamic(input_shape): self._context.set_shape_input(bindings_idx, data_shape) else: input_shape = self._engine.get_binding_shape(bindings_idx) if _is_shape_dynamic(input_shape): self._context.set_binding_shape(bindings_idx, data_shape) assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified def _prepare_buffers_if_needed(self, x_host: Dict[str, object]): # pytype: disable=attribute-error new_batch_size = list(x_host.values())[0].shape[0] current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0 # pytype: enable=attribute-error if self._has_dynamic_shapes or new_batch_size != current_batch_size: # TODO: are CUDA buffers dealloc automatically? self._set_dynamic_input_shapes(x_host) y_pred_host = {} for name in self._output_names: shape = self._context.get_binding_shape(self._engine[name]) binding_idx: int = self._engine[name] dtype_from_trt_binding = np.dtype(trt.nptype(self._engine.get_binding_dtype(binding_idx))) dtype_from_model_spec = np.dtype(self._model.outputs[name].dtype) assert dtype_from_model_spec == dtype_from_trt_binding y_pred_host[name] = np.zeros(shape, dtype=dtype_from_model_spec) y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()} # cast host input into binding dtype def _cast_input(name, data): binding_idx: int = self._engine[name] np_dtype = trt.nptype(self._engine.get_binding_dtype(binding_idx)) return data.astype(np_dtype) x_host = {name: _cast_input(name, host_input) for name, host_input in x_host.items()} x_dev = { name: cuda.mem_alloc(host_input.nbytes) for name, host_input in x_host.items() if name in self._input_names # pytype: disable=attribute-error } self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev) return self._buffers._replace(x_host=x_host) if "pycuda.driver" in sys.modules: loaders.register_extension(Format.TRT.value, TensorRTLoader) runners.register_extension(Format.TRT.value, TensorRTRunner) else: LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda/tensorrt.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import typing from collections import Counter from pathlib import Path from typing import Dict, Optional, Union import numpy as np import torch # pytype: disable=import-error import yaml from model_navigator.model import ModelSignatureConfig from model_navigator.tensor import TensorSpec from model_navigator.utils.config import YamlConfigFile from ..core import ( GET_MODEL_FN_NAME, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, load_from_file, ) from ..extensions import loaders, runners, savers from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes LOGGER = logging.getLogger(__name__) def get_sample_input(dataloader, device): for batch in dataloader: _, x, _ = batch break if isinstance(x, dict): sample_input = list(x.values()) elif isinstance(x, list): sample_input = x else: raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict") for idx, s in enumerate(sample_input): sample_input[idx] = torch.from_numpy(s).to(device) return tuple(sample_input) def get_model_device(torch_model): if next(torch_model.parameters()).is_cuda: return "cuda" else: return "cpu" def infer_model_precision(model): counter = Counter() for param in model.parameters(): counter[param.dtype] += 1 if counter[torch.float16] > 0: return Precision.FP16 else: return Precision.FP32 def _get_tensor_dtypes(dataloader, precision): def _get_dtypes(t): def _get_dtype(v): dtype = str(v.dtype) if dtype == "float64": dtype = "float32" if precision == Precision.FP16 and dtype == "float32": dtype = "float16" return np.dtype(dtype) return {k: _get_dtype(v) for k, v in t.items()} batch = next(dataloader) _, x, y = batch input_dtypes = _get_dtypes(x) output_dtypes = _get_dtypes(y) return input_dtypes, output_dtypes ### TODO assumption: floating point input ### type has same precision as the model def _get_model_signature( inputs_names: typing.List[str], outputs_names: typing.List[str], precision, dataloader_fn, batch_size_dim: typing.Optional[int] = None, ): dataloader = dataloader_fn() input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision) input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim) inputs = { name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names } outputs = { name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name])) for name in outputs_names } return ModelSignatureConfig(inputs, outputs) class PyTorchModelLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **kwargs) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) model, io_names_dict = get_model(**self._model_args) dataloader_fn = kwargs.get("dataloader_fn", None) output_type = kwargs.get("output_type", None) precision = infer_model_precision(model) batch_axis = getattr(model, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0 model_signature = _get_model_signature( inputs_names=io_names_dict["inputs"], outputs_names=io_names_dict["outputs"], precision=precision, dataloader_fn=dataloader_fn, batch_size_dim=batch_axis, ) model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs) if output_type == Format.TS_TRACE.value: return self._trace(model, dataloader_fn) elif output_type == Format.TS_SCRIPT.value: return self._script(model) elif output_type == Format.ONNX.value: return model else: raise ValueError(f"Not supported PyTorch format: {output_type}") def _trace(self, model: Model, dataloader_fn) -> Model: device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input}) return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) def _script(self, model: Model) -> Model: scripted_model = torch.jit.script(model.handle) return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) class TorchScriptLoader(BaseLoader): def __init__(self, tensor_names_path: str = None, **kwargs): self._model_args = kwargs self._io_spec = None if tensor_names_path is not None: with Path(tensor_names_path).open("r") as fh: tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader) self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"]) def load(self, model_path: Union[str, Path], **_) -> Model: if not isinstance(model_path, Path): model_path = Path(model_path) model = torch.jit.load(model_path.as_posix()) precision = infer_model_precision(model) io_spec = self._io_spec if not io_spec: yaml_path = model_path.parent / f"{model_path.name}.yaml" if not yaml_path.is_file(): raise ValueError( f"If `--tensor-names-path is not provided, " f"TorchScript model loader expects file {yaml_path} with tensor information." ) with yaml_path.open("r") as fh: tensor_info = yaml.load(fh, Loader=yaml.SafeLoader) io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"]) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class PYT2ONNXSaver(BaseSaver): def __init__(self, onnx_opset: int = None): self._onnx_opset = onnx_opset def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted." batch_axis = getattr(model.handle, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0 dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=batch_axis) device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) with torch.no_grad(): torch.onnx.export( model.handle, dummy_input, model_path, do_constant_folding=True, input_names=list(model.inputs), output_names=list(model.outputs), dynamic_axes=dynamic_axes, opset_version=self._onnx_opset, enable_onnx_checker=True, ) class TorchScriptSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: if not isinstance(model_path, Path): model_path = Path(model_path) if isinstance(model.handle, torch.jit.ScriptModule): torch.jit.save(model.handle, model_path.as_posix()) else: raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.") signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs) annotation_path = model_path.parent / f"{model_path.name}.yaml" with YamlConfigFile(annotation_path) as config_file: config_file.save_config(signature_config) class PyTorchRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return PyTorchRunnerSession(model=model) class PyTorchRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted." self._model = model self._output_names = None def __enter__(self): self._output_names = list(self._model.outputs) return self def __exit__(self, exc_type, exc_value, traceback): self._output_names = None self._model = None def __call__(self, x: Dict[str, object]): with torch.no_grad(): feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()] y_pred = self._model.handle(*feed_list) if isinstance(y_pred, torch.Tensor): y_pred = (y_pred,) y_pred = [t.cpu().numpy() for t in y_pred] y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.PYT.value, PyTorchModelLoader) loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader) loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader) savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver) savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver) savers.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXSaver) runners.register_extension(Format.PYT.value, PyTorchRunner) runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner) runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda/pyt.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401 from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .exceptions import ModelAnalyzerException class ModelAnalyzerConfig: """ A config class to set arguments to the Model Analyzer. An argument set to None will use the default. """ model_analyzer_args = [ "config-file", ] input_to_options = [ "config-file", ] def __init__(self): # Args will be a dict with the string representation as key self._args = {k: None for k in self.model_analyzer_args} self._options = { "-f": "config.yaml", } self._input_to_options = { "config-file": "-f", } def to_cli_string(self): """ Utility function to convert a config into a string of arguments to the server with CLI. Returns ------- str the command consisting of all set arguments to the model analyzer. e.g. '--model-repository=/models --verbose=True' """ # single dashed options, then verbose flags, then main args args = [f"{k} {v}" for k, v in self._options.items() if v] args += [f"--{k}={v}" for k, v in self._args.items() if v] return " ".join(args) @classmethod def allowed_keys(cls): """ Returns ------- list of str The keys that are allowed to be passed into model_analyzer """ return list(cls.model_analyzer_args) + list(cls.input_to_options) def __getitem__(self, key): """ Gets an arguments value in config Parameters ---------- key : str The name of the argument to the model analyzer Returns ------- The value that the argument is set to in this config """ if key in self._args: return self._args[key] elif key in self._input_to_options: return self._options[self._input_to_options[key]] else: raise ModelAnalyzerException(f"'{key}' Key not found in config") def __setitem__(self, key, value): """ Sets an arguments value in config after checking if defined/supported. Parameters ---------- key : str The name of the argument to the model analyzer value : (any) The value to which the argument is being set Raises ------ TritonModelAnalyzerException If key is unsupported or undefined in the config class """ if key in self._args: self._args[key] = value elif key in self._input_to_options: self._options[self._input_to_options[key]] = value else: raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer/model_analyzer_config.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ModelAnalyzerException(Exception): def __init__(self, message: str): self._message = message def __str__(self): """ Get the exception string representation. Returns ------- str The message associated with this exception, or None if no message. """ return self._message @property def message(self): """ Get the exception message. Returns ------- str The message associated with this exception, or None if no message. """ return self._message
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer/exceptions.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import subprocess from subprocess import CalledProcessError from .exceptions import ModelAnalyzerException SERVER_OUTPUT_TIMEOUT_SECS = 5 LOGGER = logging.getLogger(__name__) class ModelAnalyzerMode: PROFILE = "profile" ANALYZE = "analyze" REPORT = "report" class ModelAnalyzerReportMode: OFFLINE = "offline" ONLINE = "online" class ModelAnalyzer: """ Concrete Implementation of Model Analyzer interface that runs analyzer locally as as subprocess. """ _analyzer_path = "model-analyzer" def __init__(self, config): """ Parameters ---------- config : AnalyzerConfig the config object containing arguments for this server instance """ self._analyzer_process = None self._analyzer_config = config self._log = None def run(self, mode: str, verbose: bool = False, quiet: bool = False, report_mode: str = None): """ Starts the model analyzer locally """ if self._analyzer_path: cmd = [self._analyzer_path] if verbose: cmd += ["--verbose"] if quiet: cmd += ["--quiet"] if report_mode: cmd += ["-m"] cmd += [report_mode] cmd += [mode] cmd += self._analyzer_config.to_cli_string().split() LOGGER.debug(f"Model Analyze command: {cmd}") try: subprocess.run(cmd, check=True, start_new_session=True) except CalledProcessError as e: raise ModelAnalyzerException( f"Running {self._analyzer_path} with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}" )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer/model_analyzer.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .perf_analyzer import PerfAnalyzer # noqa: F401 from .perf_config import PerfAnalyzerConfig # noqa: F401
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/perf_analyzer/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict from .exceptions import PerfAnalyzerException class PerfAnalyzerConfig: """ A config class to set arguments to the perf_analyzer. An argument set to None will use the perf_analyzer's default. """ perf_analyzer_args = [ "async", "sync", "measurement-interval", "measurement-mode", "measurement-request-count", "concurrency-range", "request-rate-range", "request-distribution", "request-intervals", "binary-search", "num-of-sequence", "latency-threshold", "max-threads", "stability-percentage", "max-trials", "percentile", "input-data", "shared-memory", "output-shared-memory-size", "sequence-length", "string-length", "string-data", ] perf_analyzer_multiple_args = [ "shape", ] input_to_options = [ "model-name", "model-version", "batch-size", "url", "protocol", "latency-report-file", "streaming", ] input_to_verbose = ["verbose", "extra-verbose"] def __init__(self): """ Construct a PerfAnalyzerConfig """ self._args = {k: None for k in self.perf_analyzer_args} self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args} self._options = { "-m": None, "-x": None, "-b": None, "-u": None, "-i": None, "-f": None, "-H": None, "-c": None, "-t": None, } self._verbose = {"-v": None, "-v -v": None} self._input_to_options = { "model-name": "-m", "model-version": "-x", "batch-size": "-b", "url": "-u", "protocol": "-i", "latency-report-file": "-f", "streaming": "-H", "concurrency": "-c", "threads": "-t", } self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"} @classmethod def allowed_keys(cls): """ Returns ------- list of str The keys that are allowed to be passed into perf_analyzer """ return ( list(cls.perf_analyzer_args) + list(cls.perf_analyzer_multiple_args) + list(cls.input_to_options) + list(cls.input_to_verbose) ) def update_config(self, params=None): """ Allows setting values from a params dict Parameters ---------- params: dict keys are allowed args to perf_analyzer """ if params: for key in params: self[key] = params[key] def to_cli_string(self): """ Utility function to convert a config into a string of arguments to the perf_analyzer with CLI. Returns ------- str cli command string consisting of all arguments to the perf_analyzer set in the config, without the executable name. """ # single dashed options, then verbose flags, then main args args = [f"{k} {v}" for k, v in self._options.items() if v] args += [k for k, v in self._verbose.items() if v] args += [f"--{k}={v}" for k, v in self._args.items() if v] for k, v in self._multiple_args.items(): for item in v: args.append(f"--{k}={item}") return " ".join(args) def __getitem__(self, key: str): """ Gets an arguments value in config Parameters ---------- key : str The name of the argument to the perf_analyzer Returns ------- The value that the argument is set to in this config Raises ------ TritonModelAnalyzerException If argument not found in the config """ if key in self._args: return self._args[key] elif key in self._multiple_args: return self._multiple_args[key] elif key in self._input_to_options: return self._options[self._input_to_options[key]] elif key in self._input_to_verbose: return self._verbose[self._input_to_verbose[key]] else: raise PerfAnalyzerException(f"'{key}' Key not found in config") def __setitem__(self, key: str, value: Any): """ Sets an arguments value in config after checking if defined/supported. Parameters ---------- key : str The name of the argument to the perf_analyzer value : (any) The value to which the argument is being set Raises ------ TritonModelAnalyzerException If key is unsupported or undefined in the config class """ if key in self._args: self._args[key] = value elif key in self._multiple_args: self._multiple_args[key].append(value) elif key in self._input_to_options: self._options[self._input_to_options[key]] = value elif key in self._input_to_verbose: self._verbose[self._input_to_verbose[key]] = value else: raise PerfAnalyzerException( f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer." )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/perf_analyzer/perf_config.py
class PerfAnalyzerException(Exception): def __init__(self, message: str): self._message = message def __str__(self): """ Get the exception string representation. Returns ------- str The message associated with this exception, or None if no message. """ return self._message @property def message(self): """ Get the exception message. Returns ------- str The message associated with this exception, or None if no message. """ return self._message
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/perf_analyzer/exceptions.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib from subprocess import PIPE, CalledProcessError, Popen # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .exceptions import PerfAnalyzerException MAX_INTERVAL_CHANGES = 10 COUNT_INTERVAL_DELTA = 50 TIME_INTERVAL_DELTA = 2000 LOGGER = logging.getLogger(__name__) class PerfAnalyzer: """ This class provides an interface for running workloads with perf_analyzer. """ def __init__(self, config): """ Parameters ---------- config : PerfAnalyzerConfig keys are names of arguments to perf_analyzer, values are their values. """ self.bin_path = "perf_analyzer" self._config = config self._output = str() def run(self): """ Runs the perf analyzer with the initialized configuration Returns ------- List of Records List of the metrics obtained from this run of perf_analyzer Raises ------ PerfAnalyzerException If subprocess throws CalledProcessError """ for _ in range(MAX_INTERVAL_CHANGES): command = [self.bin_path] command += self._config.to_cli_string().replace("=", " ").split() LOGGER.debug(f"Perf Analyze command: {command}") try: process = Popen(command, start_new_session=True, stdout=PIPE, encoding="utf-8") streamed_output = "" while True: output = process.stdout.readline() if output == "" and process.poll() is not None: break if output: streamed_output += output print(output.rstrip()) self._output += streamed_output result = process.poll() if result != 0: raise CalledProcessError(returncode=result, cmd=command, output=streamed_output) return except CalledProcessError as e: if self._faild_with_measruement_inverval(e.output): if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows": self._increase_request_count() else: self._increase_time_interval() else: raise PerfAnalyzerException( f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}" ) raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.") def output(self): """ Returns ------- The stdout output of the last perf_analyzer run """ if self._output: return self._output raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.") def _faild_with_measruement_inverval(self, output: str): return ( output.find("Failed to obtain stable measurement") or output.find("Please use a larger time window") ) != -1 def _increase_request_count(self): self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA LOGGER.debug( "perf_analyzer's measurement request count is too small, " f"increased to {self._config['measurement-request-count']}." ) def _increase_time_interval(self): self._config["measurement-interval"] += TIME_INTERVAL_DELTA LOGGER.debug( "perf_analyzer's measurement window is too small, " f"increased to {self._config['measurement-interval']} ms." )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/perf_analyzer/perf_analyzer.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Any, Dict, List, Union # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .task import DataObject class Configuration(DataObject): """ Configuration object - handle single experiment data """ def __init__( self, precision: str, format: str, batch_size: Union[str, List], accelerator: str, triton_gpu_engine_count: int, triton_max_queue_delay: int, capture_cuda_graph: int, checkpoint_variant: str, triton_preferred_batch_sizes: Union[str, List], **kwargs: Any, ): """ Args: precision: Target model precision format: Target conversion format batch_size: Batch sizes to evaluate accelerator: Triton Backend Accelerator triton_gpu_engine_count: Number of model instances triton_max_queue_delay: Maximal queue delay capture_cuda_graph: Triton Capture CUDA Graph optimization for tensorrt checkpoint_variant: Checkpoint used for configuration triton_preferred_batch_sizes: Preferred batch sizes **kwargs: Additional model arguments """ if isinstance(batch_size, str): batch_size = map(lambda item: int(item), batch_size.split(",")) if isinstance(triton_preferred_batch_sizes, str): triton_preferred_batch_sizes = map(lambda item: int(item), triton_preferred_batch_sizes.split(" ")) self.precision = precision self.format = format self.batch_size = sorted(batch_size) self.accelerator = accelerator self.triton_gpu_engine_count = triton_gpu_engine_count self.triton_max_queue_delay = triton_max_queue_delay self.capture_cuda_graph = capture_cuda_graph self.max_batch_size = max(self.batch_size) self.checkpoint_variant = checkpoint_variant self.triton_preferred_batch_sizes = " ".join(map(lambda i: str(i), sorted(triton_preferred_batch_sizes))) for key, value in kwargs.items(): self.__setattr__(key, value) @property def parameters(self) -> Dict: """ Return values stored in configuration Returns: Dictionary with configuration parameters """ return self.__dict__
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/configuration.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import pathlib from datetime import datetime from typing import Dict, List # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .config import Config from .configuration import Configuration from .downloader import download from .experiment import Experiment, Stage from .logger import LOGGER from .maintainer import Maintainer from .pipeline import Pipeline from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage from .task import Checkpoint, Dataset, SystemInfo, Task from .triton import Triton from .utils import clean_directory class Preparer(abc.ABC): """ Runner preparer object. """ @abc.abstractmethod def exec( self, workspace: pathlib.Path, config: Config, pipeline: Pipeline, maintainer: Maintainer, triton: Triton, logs_dir: pathlib.Path, ): pass class ExperimentPreparer(Preparer): """ Experiment runner preparer object. """ def exec( self, workspace: pathlib.Path, config: Config, pipeline: Pipeline, maintainer: Maintainer, triton: Triton, logs_dir: pathlib.Path, ): LOGGER.info("Preparing Triton container image") triton_container_image = self._prepare_triton_container_image(config, maintainer, triton) LOGGER.info("Initialize task") task = self._initialize_task( workspace=workspace, config=config, pipeline=pipeline, triton_container_image=triton_container_image, logs_dir=logs_dir, ) LOGGER.info("Preparing directories") self._create_dirs(workspace, task) LOGGER.info("Clean previous run artifacts directories") self._clean_previous_run_artifacts(workspace, task) LOGGER.info("Downloading checkpoints") self._download_checkpoints(task) return task def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None: """ Create directories used to store artifacts and final results Returns: None """ for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]: directory_path = workspace / directory directory_path.mkdir(parents=True, exist_ok=True) LOGGER.info(f"Directory {directory} created.") def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None: """ Clean logs from previous run Returns: None """ for directory in [ task.logs_dir, task.results_dir, ]: directory_path = workspace / directory clean_directory(directory_path) LOGGER.info(f"Location {directory} cleaned.") def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str: """ Prepare Triton Container Image based on provided configuration Returns: Name of container image to use in process """ if not config.triton_dockerfile: image_name = triton.container_image(config.container_version) LOGGER.info(f"Using official Triton container image: {image_name}.") return image_name if config.triton_container_image: LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}") return config.triton_container_image normalized_model_name = config.model_name.lower().replace("_", "-") image_name = f"tritonserver-{normalized_model_name}:latest" LOGGER.info(f"Building Triton Container Image: {image_name}") maintainer.build_image( image_name=image_name, image_file_path=pathlib.Path(config.triton_dockerfile), build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)}, ) return image_name def _download_checkpoints(self, task: Task) -> None: """ Download checkpoints """ for variant, checkpoint in task.checkpoints.items(): checkpoint_url = checkpoint.url download_path = checkpoint.path if download_path.is_dir(): LOGGER.info(f"Checkpoint {download_path.name} already downloaded.") continue if not checkpoint_url: LOGGER.warning( f"Checkpoint {variant} url is not provided." "\nIf you want to use that checkpoint please train the model locally" f"\nand copy to {download_path} directory" ) continue download(checkpoint_url, download_path) def _initialize_task( self, workspace: pathlib.Path, config: Config, pipeline: Pipeline, triton_container_image: str, logs_dir: pathlib.Path, ) -> Task: """ Initialize task object Args: workspace: Path to workspace where artifacts are stored config: Config object pipeline: Pipeline object triton_container_image: Triton Inference Server container image used for tests Returns: Task object """ datasets = {} for dataset in config.datasets: datasets[dataset.name] = Dataset(name=dataset.name) checkpoints = {} for checkpoint in config.checkpoints: download_path = workspace / Task.checkpoints_dir / checkpoint.name checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path) results_types = self._task_results_types(pipeline=pipeline) stages = dict() for stage in pipeline.stages(): stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type} experiments = list() for idx, configuration in enumerate(config.configurations, start=1): experiment = self._prepare_experiment( idx=idx, configuration=configuration, results_types=results_types, stages=stages, ) experiments.append(experiment) system_info = SystemInfo.from_host() task = Task( model_name=config.model_name, framework=config.framework, checkpoints=checkpoints, datasets=datasets, datasets_dir=config.datasets_dir, experiments=experiments, container_version=config.container_version, system_info=system_info, triton_container_image=triton_container_image, triton_custom_operations=config.triton_custom_operations, triton_load_model_method=config.triton_load_model_method, started_at=int(datetime.utcnow().timestamp()), logs_dir=logs_dir, ) return task def _task_results_types(self, pipeline: Pipeline) -> List[str]: """ Types of results generated as part of task Returns: List of result types """ results = list() for stage in pipeline.stages(): if TritonPerformanceOfflineStage.label == stage.label: results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE) continue if TritonPerformanceOnlineStage.label == stage.label: results.append(ResultsType.TRITON_PERFORMANCE_ONLINE) continue return results def _prepare_experiment( self, idx: int, configuration: Configuration, results_types: List[str], stages: Dict, ) -> Experiment: """ Prepare experiments data Args: idx: Experiment index configuration: Configuration object results_types: Results types stored in experiment stages: Stages executed as part of experiment Returns: Experiment object """ parameters = {key.lower(): value for key, value in configuration.parameters.items()} results_mapped = dict() for result_type in results_types: results_mapped[result_type] = result_type stages_mapped = dict() for name, stage_data in stages.items(): stages_mapped[name] = Stage(name=name, **stage_data) experiment = Experiment( experiment_id=idx, parameters=parameters, stages=stages_mapped, results=results_mapped, ) return experiment
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/preparer.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib import platform import subprocess from datetime import datetime from typing import Dict, List, Optional, Union import cpuinfo import psutil import yaml # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import CustomDumper, DataObject from .experiment import Experiment from .triton import Triton class GPU(DataObject): """ GPU information data object """ name: str driver_version: str cuda_version: str memory: str tdp: str def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str): """ Args: name: name of GPU driver_version: version of driver cuda_version: version of CUDA memory: size of memory available on GPU [MB] tdp: Max TDP of GPU unit """ self.name = name self.driver_version = driver_version self.cuda_version = cuda_version self.memory = memory self.tdp = tdp @staticmethod def from_dict(data: Dict): """ Create GPU object from dictionary Args: data: dictionary with GPU data Returns: GPU object """ return GPU( name=data["name"], driver_version=data["driver_version"], cuda_version=data["cuda_version"], memory=data["memory"], tdp=data["tdp"], ) @staticmethod def from_host(): """ Create GPU object from host data Returns: GPU object """ data = subprocess.check_output( ["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"] ).decode() lines = data.split(sep="\n") device_details = lines[1].split(",") name = device_details[0].strip() driver_version = device_details[1].strip() memory = device_details[2].strip() tdp = device_details[3].strip() cuda_version = None data = subprocess.check_output(["nvidia-smi", "--query"]).decode() lines = data.split(sep="\n") for line in lines: if line.startswith("CUDA Version"): cuda_version = line.split(":")[1].strip() break return GPU( name=name, driver_version=driver_version, cuda_version=cuda_version, memory=memory, tdp=tdp, ) class CPU(DataObject): """ CPU details """ name: str physical_cores: int logical_cores: int min_frequency: float max_frequency: float def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float): """ Args: name: name of CPU unit physical_cores: number of physical cores available on CPU logical_cores: number of logical cores available on CPU min_frequency: minimal clock frequency max_frequency: maximal clock frequency """ self.name = name self.physical_cores = physical_cores self.logical_cores = logical_cores self.min_frequency = min_frequency self.max_frequency = max_frequency @staticmethod def from_host(): """ Create CPU object from host data Returns: CPU object """ return CPU( name=cpuinfo.get_cpu_info()["brand_raw"], physical_cores=psutil.cpu_count(logical=False), logical_cores=psutil.cpu_count(logical=True), min_frequency=psutil.cpu_freq().min, max_frequency=psutil.cpu_freq().max, ) class Memory(DataObject): """ Memory data object """ size: float def __init__(self, size: float): """ Args: size: RAM memory size in MB """ self.size = size @staticmethod def from_host(): """ Create Memory object from host data Returns: Memory object """ svm = psutil.virtual_memory() return Memory(size=svm.total) class SystemInfo(DataObject): """ System Information data object """ system: str cpu: CPU memory: Memory gpu: GPU def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU): """ Args: system: name of operating system cpu: CPU info memory: Memory info gpu: GPU info """ self.system = system self.cpu = cpu self.memory = memory self.gpu = gpu @staticmethod def from_host(): """ Create SystemInfo object from host data Returns: SystemInfo object """ system = platform.platform() gpu = GPU.from_host() memory = Memory.from_host() cpu = CPU.from_host() return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory) class Checkpoint(DataObject): """ Checkpoint data object """ def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]): """ Args: name: Name of checkpoint path: Location of checkpoint on local hardware """ self.name = name self.url = url self.path = pathlib.Path(path) class Dataset(DataObject): """ Dataset data object """ def __init__(self, name: str): """ Args: name: Name of dataset """ self.name = name class Task(DataObject): """ Task data object to store build information """ model_name: str framework: str started_at: int ended_at: Optional[int] container_version: str checkpoints: Dict[str, Checkpoint] datasets: Dict[str, Dataset] datasets_dir: Optional[Union[str, pathlib.Path]] experiments: List[Experiment] system_info: SystemInfo triton_container_image: Optional[str] triton_custom_operations: Optional[str] filename: str = "task.yaml" results_dir: str = "results" checkpoints_dir: str = "checkpoints" def __init__( self, model_name: str, framework: str, container_version: str, checkpoints: Dict, datasets: Dict, experiments: List, system_info: SystemInfo, started_at: int, logs_dir: pathlib.Path = pathlib.Path("/var/logs"), datasets_dir: Optional[Union[str, pathlib.Path]] = None, ended_at: Optional[int] = None, triton_container_image: Optional[str] = None, triton_custom_operations: Optional[str] = None, triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT, ): """ Args: model_name: Name of model framework: Model framework container_version: Container version used in task checkpoints: List of checkpoints datasets: List of datasets datasets_dir: Directory where datasests are stored experiments: List of experiments run as part of task system_info: information about node on which experiment was executed started_at: Time when task has started ended_at: Time when task has ended triton_container_image: Custom Triton Container Image used for task triton_custom_operations: Custom operations library path triton_load_model_method: Method how models are loaded on Triton """ self.started_at = started_at self.ended_at = ended_at self.model_name = model_name self.framework = framework self.container_version = container_version self.checkpoints = checkpoints self.datasets = datasets self.datasets_dir = pathlib.Path(datasets_dir) self.experiments = experiments self.system_info = system_info self.triton_container_image = triton_container_image self.triton_custom_operations = triton_custom_operations self.triton_load_model_method = triton_load_model_method self.logs_dir = logs_dir def start(self) -> None: """ Update stage execution info at start Returns: None """ self.started_at = int(datetime.utcnow().timestamp()) def end(self) -> None: """ Update stage execution info at end Returns: None """ self.ended_at = int(datetime.utcnow().timestamp()) def to_file(self, file_path: Union[pathlib.Path, str]): """ Store task data to YAML file Args: file_path: path to file where task data has to be saved Returns: None """ task_data = self.to_dict() with open(file_path, "w") as f: yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/task.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib import signal import sys from typing import List, Type # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .config import Config from .exceptions import RunnerException from .executor import Executor from .finalizer import Finalizer from .logger import LOGGER, log_format from .maintainer import Maintainer from .pipeline import Pipeline from .preparer import Preparer from .triton import Triton class Runner: """ Runner class. Main entrypoint to performing task and experiments """ WORKSPACE = pathlib.Path.cwd() EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace" def __init__( self, pipeline: Pipeline, config: Config, executor_cls: Type[Executor], maintainer_cls: Type[Maintainer], preparer_cls: Type[Preparer], finalizer_cls: Type[Finalizer], devices: List[str] = None, log_level: int = logging.INFO, ): self._pipeline = pipeline self._config = config self._pipeline = pipeline self._config = config self._preparer = preparer_cls() self._finalizer = finalizer_cls() self._devices = devices or ["0"] self._log_level = log_level self._logs_dir = self.EXECUTOR_WORKSPACE / "logs" self._log_file_path = self._logs_dir / "runner.log" self._maintainer = maintainer_cls() self._executor = executor_cls( workspace=self.EXECUTOR_WORKSPACE, maintainer=self._maintainer, pipeline=pipeline, devices=devices, ) signal.signal(signal.SIGINT, self._catch) self._logs_dir.mkdir(parents=True, exist_ok=True) def start(self) -> None: """ Start runner Returns: None """ self._setup_logger() task = self._preparer.exec( workspace=self.EXECUTOR_WORKSPACE, config=self._config, pipeline=self._pipeline, logs_dir=self._logs_dir, maintainer=self._maintainer, triton=Triton(), ) results = [] try: for result in self._executor.start(task): results.append(result) except RunnerException as e: LOGGER.error(f"Error running task: {str(e)}") finally: self._executor.stop() self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results) def _catch(self, signum, frame): """ SIGINT catcher. Stops executor on any sigterm. Args: signum: signal id frame: signal frame """ self._executor.stop() sys.exit(0) def _setup_logger(self) -> None: """ Add file handle for logger Returns: None """ file = logging.FileHandler(self._log_file_path) formatter = logging.Formatter(log_format) file.setFormatter(formatter) LOGGER.addHandler(file) LOGGER.setLevel(level=self._log_level) LOGGER.initialize(file_path=self._log_file_path)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/runner.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import Framework, Paths class Triton: """ Triton Inference Server helper class """ image = "nvcr.io/nvidia/tritonserver" tag = "py3" class LOAD_MODE: """ Loading mode available in Triton """ POLL = "poll" EXPLICIT = "explicit" @staticmethod def container_image(container_version: str): """ Container image based on version Args: container_version: Version of container to be used Returns: Image name with tag """ return f"{Triton.image}:{container_version}-{Triton.tag}" @staticmethod def command( framework: str, repository_path: str, strict_mode: bool = False, poll_model: bool = False, metrics: bool = False, verbose: bool = False, ): """ Command to run Triton Inference Server inside container Args: framework: Framework used for model repository_path: Path to model repository strict_mode: Flag to use strict model config poll_model: Poll model metrics: Enable GPU metrics (disable for MIG) verbose: Use verbose mode logging Returns: """ triton_command = f"tritonserver --model-store={repository_path}" if poll_model: triton_command += " --model-control-mode=poll --repository-poll-secs 5" else: triton_command += " --model-control-mode=explicit" if not strict_mode: triton_command += " --strict-model-config=false" if not metrics: triton_command += " --allow-metrics=false --allow-gpu-metrics=false" if verbose: triton_command += " --log-verbose 1" if framework in (Framework.TensorFlow1, Framework.TensorFlow2): version = 1 if framework == Framework.TensorFlow1 else 2 triton_command += f" --backend-config=tensorflow,version={version}" return triton_command @staticmethod def library_path(framework: str): """ Obtain custom library path for framework Args: framework: Framework used for model Returns: Path to additional libraries needed by framework """ paths = { Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch", Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1", Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2", } return paths[framework] @staticmethod def custom_library_path_remote() -> str: """ Path to custom library mounted in Triton container Returns: Path to shared library with custom operations """ return f"{Paths.LIBRARIES_PATH}/libcustomops.so" @staticmethod def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path: """ Path to custom library in local path Args: libs_dir: path to libraries directory Returns: Path to shared library with custom operations """ return libs_dir / "libcustomops.so"
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/triton.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, List, Optional, Union import yaml if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .configuration import Configuration from .core import DataObject from .triton import Triton class Checkpoint(DataObject): """ Checkpoint data placeholder """ name: str url: str def __init__(self, name: str, url: str): self.name = name self.url = url class Dataset(DataObject): """ Dataset data placeholder """ name: str def __init__(self, name: str): self.name = name class Config(DataObject): """ Configuration object for runner experiments """ def __init__( self, model_name: str, framework: str, container_version: str, configurations: List[Configuration], datasets_dir: str = "datasets", datasets: List[Dataset] = None, checkpoints: List[Checkpoint] = None, triton_dockerfile: Optional[str] = None, triton_container_image: Optional[str] = None, triton_custom_operations: Optional[str] = None, triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT, ): """ Args: model_name: Name of model framework: Framework used to create model container_version: Version of Triton Inference Server container used for evaluation configurations: List of experiments configurations datasets_dir: Directory where datasets are stored datasets: Datasets used for conversion/export checkpoints: Checkpoints with trained model triton_load_model_method: Triton Inference Server model loading mode triton_dockerfile: Dockerfile for Triton to build custom image triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile triton_custom_operations: Path where custom operation library is stored """ self.model_name = model_name self.framework = framework self.container_version = container_version self.configurations = configurations self.datasets_dir = datasets_dir self.datasets = datasets self.checkpoints = checkpoints self.triton_load_model_method = triton_load_model_method self.triton_dockerfile = triton_dockerfile self.triton_container_image = triton_container_image self.triton_custom_operations = triton_custom_operations def to_file(self, file_path: Union[pathlib.Path, str]) -> None: """ Save config data to file Args: file_path: path to file where config data is should be stored Returns: None """ data = self.to_dict() with open(file_path, "w") as f: yaml.safe_dump(data, f) @staticmethod def from_dict(config_data: Dict): """ Create configuration object from data stored in dictionary Args: config_data: dictionary with config data Returns: Config object """ configurations = [] for configuration_data in config_data["configurations"]: configuration = Configuration(**configuration_data) configurations.append(configuration) checkpoints = [] for checkpoint_data in config_data.get("checkpoints", []): checkpoint = Checkpoint( name=checkpoint_data["name"], url=checkpoint_data["url"], ) checkpoints.append(checkpoint) datasets = [] for dataset_data in config_data.get("datasets", []): dataset = Dataset(name=dataset_data["name"]) datasets.append(dataset) return Config( model_name=config_data["model_name"], framework=config_data["framework"], container_version=config_data["container_version"], configurations=configurations, checkpoints=checkpoints, datasets=datasets, datasets_dir=config_data.get("datasets_dir"), triton_load_model_method=config_data["triton_load_model_method"], triton_dockerfile=config_data.get("triton_dockerfile"), triton_container_image=config_data.get("triton_container_image"), triton_custom_operations=config_data.get("triton_custom_operations"), ) @staticmethod def from_file(file_path: Union[pathlib.Path, str]): """ Load experiment data from file Args: file_path: path to file where experiment data is stored Returns: Experiment object """ with open(file_path, "r") as f: config_data = yaml.safe_load(f) return Config.from_dict(config_data)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/config.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib import shutil import urllib.request from typing import Any, Callable from zipfile import ZipFile from retrying import retry from tqdm.auto import tqdm # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .logger import LOGGER from .exceptions import RunnerException def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None: """ Unzip acrhive to provided path Args: checkpoint_path: Path where archive has to be unpacked archive_path: Path to archive Archive filename Returns: None """ LOGGER.info(f"Creating directory for checkpoint: {checkpoint_path.name}") checkpoint_path.mkdir(parents=True, exist_ok=True) LOGGER.info(f"Unpacking checkpoint files {checkpoint_path}") with ZipFile(archive_path, "r") as zf: zf.extractall(path=checkpoint_path) LOGGER.info("done") LOGGER.info(f"Removing zip file: {archive_path}") archive_path.unlink() LOGGER.info("done") def download_progress(t: Any) -> Callable: """ Progress bar Args: t: progress Returns: Callable """ last_b = [0] def update_to(b: int = 1, bsize: int = 1, tsize: int = None): if tsize not in (None, -1): t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return update_to @retry(stop_max_attempt_number=3) def download(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None: """ Download checkpoint from given url to provided path Args: checkpoint_url: Url from which checkpoint has to be downloaded checkpoint_path: Path where checkpoint has to be stored Returns: None """ LOGGER.info(f"Downloading checkpoint from {checkpoint_url}") with tqdm(unit="B") as t: reporthook = download_progress(t) result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook) filename = result[0] LOGGER.info(f"Checkpoint saved in {filename}") file_path = pathlib.Path(filename) if not file_path.is_file() and not file_path.is_dir(): raise RunnerException(f"Checkpoint {filename} does not exist") LOGGER.info(f"Moving checkpoint to {checkpoint_path.parent}") shutil.move(file_path, checkpoint_path.parent / file_path.name) LOGGER.info("done") archive_path = checkpoint_path.parent / file_path.name unzip(checkpoint_path, archive_path)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/downloader.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import pathlib import shutil from typing import Dict, List import yaml # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .experiment import ExperimentResult from .logger import LOGGER from .stages import ResultsType from .summary import load_results, save_summary from .task import Task class Finalizer(abc.ABC): @abc.abstractmethod def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]): pass class ExperimentFinalizer(Finalizer): """ Public runner finalizer object. """ def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]): results_path = workspace / task.results_dir self._generate_summary(results_path, results) self._finalize_task(results_path, task) def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None: """ Finalize task information Args: task: Task object Returns: None """ task.end() file_path = results_path / task.filename LOGGER.debug(f"Saving task details to file {file_path}") task.to_file(file_path) LOGGER.debug("Done") LOGGER.info(f"Task details and results stored in {results_path}") def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]): """ Generate summary for results collected in all experiments Args: results_path: Path where results should be stored experiment_results: Results collected from experiments Returns: """ performance_offline_results = list() performance_online_results = list() results_mapping = { ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results, ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results, } self._collect_summary_results(experiment_results, results_mapping) self._prepare_final_results(results_path, results_mapping) def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict): for experiment_result in experiment_results: experiment = experiment_result.experiment for result_type, result_path in experiment_result.results.items(): if not result_path.is_file() and not result_path.is_dir(): raise FileNotFoundError(f"Expected file {result_path} not found") LOGGER.debug(f"Found {result_type} in {result_path} file.") if result_type not in results_mapping: LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.") return LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary") result = load_results( results_path=result_path, parameters=experiment.parameters, result_type=result_type, ) results_mapping[result_type].extend(result) LOGGER.debug(f"Done.") def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None: """ Prepare summary files for offline and online performance Args: results_path: Path where results should be stored results_mapping: Mapping with results type and collected results for given stage Returns: None """ for results_type, results in results_mapping.items(): save_summary( result_type=results_type, results=results, summary_dir=results_path, )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/finalizer.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pathlib # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import Command from .exceptions import RunnerException from .stages import Stage class CommandsExporter: """ Command exported to BASH scripts """ def __init__(self, scripts_dir: pathlib.Path): """ Args: scripts_dir: Paths where scripts should be stored """ self._scripts_dir = scripts_dir def export(self, stage: Stage) -> Command: """ Export stage commands to script and return new command to execute Args: stage: Stage object with commands Returns: Command object with script execution command """ filename = self._get_filename(stage.label) file_path = self._scripts_dir / filename with open(file_path, "w+") as stagefile: stagefile.write("set -x\n") stagefile.write("set -e\n") stagefile.write("export PYTHONUNBUFFERED=1\n") stagefile.write("export PYTHONPATH=`pwd`\n") for command in stage.commands: stagefile.write(str(command)) result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}') if result != 0: raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}") command = Command(f"bash -xe {file_path.as_posix()}") return command def _get_filename(self, label: str): """ Generate filename for script based on label Args: label: String with stage label Returns: String with script filename """ filename = label.replace(" ", "_").lower() filename = f"{filename}.sh" return filename
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/exporter.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from enum import Enum from typing import Any, Dict, List import yaml class CustomDumper(yaml.Dumper): """ Custom YAML dumper to avoid craeting aliases """ def ignore_aliases(self, data: Dict) -> bool: return True class Paths: """ Paths mapping inside Triton Container """ MODEL_REPOSITORY_PATH = "/mnt/triton-models" LIBRARIES_PATH = "/mnt/libs" class Framework(Enum): """ Supported frameworks """ TensorFlow1 = "TensorFlow1" TensorFlow2 = "TensorFlow2" PyTorch = "PyTorch" class Command: """Represents wrapper of raw string command""" def __init__(self, data: str): """ Store command data Args: data: string with bash commands to execute """ self._data = data def __str__(self) -> str: """ String object representation Returns: String """ return self._data class DataObject(object): """ Data object representation handling recursive transformation from object to dict """ READ_ONLY = set() def to_dict(self) -> Dict: """ Represent object as dictionary Returns: Dict """ data = dict() filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY} for key, value in filtered_data.items(): data[key] = self._convert_value(value) return data def _convert_value(self, value: Any) -> Any: """ Convert value based on its type Args: value: variable to convert Returns: Converted object """ if isinstance(value, DataObject): value = value.to_dict() elif isinstance(value, dict): value = self._from_dict(value) elif isinstance(value, list): value = self._from_list(value) elif isinstance(value, Enum): value = value.value elif isinstance(value, pathlib.Path): value = value.as_posix() return value def _from_dict(self, values: Dict) -> Any: """ Convert dictionary values Args: values: dictionary with values Returns: Any """ data = dict() for key, value in values.items(): data[key] = self._convert_value(value) return data def _from_list(self, values: List) -> Any: """ Convert list of values Args: values: list with values Returns: Any """ items = list() for value in values: item = self._convert_value(value) items.append(item) return items AVAILABLE_FRAMEWORKS = [f.value for f in Framework]
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/core.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib import coloredlogs class Logger(logging.Logger): def __init__(self, name, level=logging.NOTSET): super().__init__(name, level=level) self._file_path = None def initialize(self, file_path: pathlib.Path): self._file_path = file_path def write(self, log: str): if not self._file_path: return with open(self._file_path, "+a") as file: file.write(log) LOGGER = Logger("runner") log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(format=log_format) coloredlogs.install( level=logging.INFO, fmt=log_format, logger=LOGGER, field_styles={ "asctime": {"color": "green"}, "hostname": {"color": "magenta"}, "levelname": {"bold": True, "color": "blue"}, "name": {"color": "blue"}, "programname": {"color": "cyan"}, "username": {"color": "yellow"}, }, reconfigure=True, )
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/logger.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import List, Type # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .config import Config from .executor import Executor from .finalizer import Finalizer from .maintainer import Maintainer from .pipeline import Pipeline from .preparer import Preparer from .runner import Runner class RunnerProxy: """ Runner proxy to configure original runner """ maintainer_cls: Type[Maintainer] = None executor_cls: Type[Executor] = None preparer_cls: Type[Preparer] = None finalizer_cls: Type[Finalizer] = None def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]): """ RunnerProxy constructor Args: config: Config object pipeline: Pipeline to evaluate devices: List of devices to use for tests """ self._runner = Runner( config=config, pipeline=pipeline, devices=devices, maintainer_cls=self.maintainer_cls, executor_cls=self.executor_cls, preparer_cls=self.preparer_cls, finalizer_cls=self.finalizer_cls, ) def start(self) -> None: """ Runner interface """ self._runner.start()
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/runner_proxy.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import List, Optional, Tuple, Union # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import Command class ResultsType: """ Results types generated by runner """ TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline" TRITON_PERFORMANCE_ONLINE = "triton_performance_online" class Stage: """ Stage definition """ label: str commands: List[Command] result_path: Optional[str] result_type: Optional[str] def __init__( self, commands: Union[Tuple[str, ...], List[str]], result_path: Optional[str] = None, result_type: Optional[str] = None, ): """ Args: commands: List or Tuple of commands provided as raw string result_path: Path to results file generated by stage result_type: Type of results generated by stage """ if type(commands) not in [tuple, list]: raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""") self.commands = list(map(lambda command: Command(data=command), commands)) self.result_path = result_path self.result_type = result_type class ExportStage(Stage): label = "Export Model" class ConversionStage(Stage): label = "Convert Model" class DeployStage(Stage): label = "Deploy Model" class CorrectnessStage(Stage): label = "Model Correctness Tests" class TritonPreparePerformanceProfilingDataStage(Stage): label = "Prepare Triton Profiling Data" class TritonPerformanceOfflineStage(Stage): label = "Triton Performance Offline Tests" class TritonPerformanceOnlineStage(Stage): label = "Triton Performance Online Tests"
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/stages.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import pathlib from datetime import datetime from typing import Any, Dict, Optional # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import DataObject class ExperimentStatus(object): """ Experiment status flags object """ SUCCEED = "Succeed" FAILED = "Failed" class StageStatus: """ Stages status flags object """ SUCCEED = "Succeed" FAILED = "Failed" class Stage(DataObject): """ Stage data object """ name: str status: str started_at: Optional[int] ended_at: Optional[int] result_path: Optional[str] result_type: Optional[str] def __init__( self, name: str, result_path: Optional[str], result_type: Optional[str], status: str = StageStatus.FAILED, started_at: Optional[int] = None, ended_at: Optional[int] = None, ): """ Args: name: name of stage result_path: path where results file is stored result_type: type of results status: success/fail status started_at: time when stage has started ended_at: time when stage has ended """ self.name = name self.status = status self.started_at = started_at self.ended_at = ended_at self.result_path = result_path self.result_type = result_type def start(self) -> None: """ Update stage execution info at start Returns: None """ self.started_at = int(datetime.utcnow().timestamp()) def end(self) -> None: """ Update stage execution info at end Returns: None """ self.status = StageStatus.SUCCEED self.ended_at = int(datetime.utcnow().timestamp()) class Experiment(DataObject): """ Experiment data object """ experiment_id: int parameters: Dict stages: Dict[str, Stage] results: Dict[str, str] status: str started_at: Optional[int] ended_at: Optional[int] def __init__( self, experiment_id: int, parameters: Dict, stages: Dict[str, Stage], results: Dict[str, str], started_at: Optional[int] = None, ended_at: Optional[int] = None, status: str = ExperimentStatus.FAILED, ): """ Args: experiment_id: experiment identifier parameters: dictionary with experiment configuration stages: dictionary with stages run in experiment results: mapping between results types and location where are stored started_at: time when experiment has started ended_at: time when experiment has ended status: experiment success/fail information """ self.experiment_id = experiment_id self.started_at = started_at self.ended_at = ended_at self.parameters = parameters self.stages = stages self.status = status self.results = results self.results_dir = f"experiment_{experiment_id}" def start(self) -> None: """ Update experiment execution info at start Returns: None """ self.started_at = int(datetime.utcnow().timestamp()) def end(self) -> None: """ Update experiment execution info at end Returns: None """ self.status = ExperimentStatus.SUCCEED self.ended_at = int(datetime.utcnow().timestamp()) @dataclasses.dataclass class Status: state: ExperimentStatus message: str @dataclasses.dataclass class ExperimentResult: """ Experiment result object """ status: Status experiment: Experiment results: Dict[str, pathlib.Path] payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/experiment.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import pathlib from typing import Dict, List, Union # method from PEP-366 to support relative import in executed modules import yaml if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ..deployment_toolkit.report import save_results, sort_results from .logger import LOGGER def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None: """ Create file with summary for results of given type Args: result_type: Type of results to dump results: Results data summary_dir: Path where results should be stored Returns: None """ if len(results) == 0: LOGGER.warning(f"No {result_type} results found.") return results = sort_results(results=results) kind_file = summary_dir / f"{result_type}_summary.csv" save_results(filename=kind_file.as_posix(), data=results, formatted=True) LOGGER.info(f"Summary for {result_type} stored in {kind_file}") def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List: """ Update results Args: results_path: Path to file or directory from which data should be read result_type: type of results parameters: Parameters used in experiment which generated results Returns: List of result rows """ LOGGER.debug(f"Loading {result_type} from {results_path} for summary") results_path = pathlib.Path(results_path) if results_path.is_file(): files = [results_path] elif results_path.is_dir(): files = list(results_path.iterdir()) else: LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.") data = [{}] return data if any([file.name.endswith(".ckpt") for file in files]): model_analyzer_metrics = results_path / "metrics-model-inference.csv" files = [model_analyzer_metrics] else: files = [file for file in files if file.name.endswith(".csv")] results = list() parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"} for file in files: if file.suffix == ".csv": data = _generate_data_from_csv(file=file) elif file.suffix == ".json": data = _generate_data_from_json(file=file) elif file.suffix == ".yaml": data = _generate_data_from_yaml(file=file) else: raise ValueError(f"Unsupported file extension: {file.suffix}") for item in data: result = {**parameters_cpy, **item} results.append(result) LOGGER.debug(f"Loading done. Collected {len(results)} results.") return results def _normalize_key(*, key: str) -> str: """ Normalize key Args: key: Key to normalize Returns: Normalized string """ key = "_".join(key.split(sep=" ")) key = key.lower() return key def _normalize_keys(*, data: Dict) -> Dict: """ Normalize keys in dictionary Args: data: Dictionary to normalize Returns: Normalized dictionary """ keys = {_normalize_key(key=key): value for key, value in data.items()} return keys def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]: """ Generate result rows from CSV file Args: file: CSV file path Returns: List of rows """ LOGGER.debug(f"Reading data from {file}") filtered_rows: List[Dict] = [] with open(file, "r") as csvfile: reader = csv.DictReader(csvfile) for r in reader: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.debug("done") return filtered_rows def _generate_data_from_json(file: pathlib.Path) -> List[Dict]: LOGGER.info(f"Reading data from {file}") filtered_rows: List[Dict] = list() with open(file, "r") as json_file: file_data = json.load(json_file) if not isinstance(file_data, list): file_data = [file_data] for r in file_data: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.info("done") return filtered_rows def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]: LOGGER.info(f"Reading data from {file}") filtered_rows: List[Dict] = list() with open(file, "r") as yaml_file: file_data = yaml.safe_load(yaml_file) if not isinstance(file_data, list): file_data = [file_data] for r in file_data: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.info("done") return filtered_rows
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/summary.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pathlib import shutil import subprocess from enum import Enum from typing import Any, List, Optional # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import Command from .exceptions import RunnerException from .logger import LOGGER def format_env_key(s: str): """ Format environmental variable key Args: s: String to format Returns: Upper cased string """ return s.upper() def format_env_value(value: Any) -> str: """ Format environment variable value Args: value: value to be formatted Returns: Formatted value as a string """ value = value if not isinstance(value, Enum) else value.value value = value if type(value) not in [list, tuple] else ",".join(map(str, value)) value = str(value) return value def get_result_path(result_path: str) -> str: """ Map result path when different variants passed ex. with env variable in path Args: result_path: Path to result file Returns: str """ for env_var, val in os.environ.items(): result_path = result_path.replace(f"${{{env_var}}}", val) if result_path.startswith("/"): return result_path if result_path.startswith("./"): result_path = result_path[2:] return result_path def clean_directory(directory: pathlib.Path) -> None: """ Remove all files and directories from directory Args: directory: Path to directory which should be cleaned Returns: None """ LOGGER.debug(f"Cleaning {directory.as_posix()}") if not directory.is_dir(): LOGGER.warning(f"{directory.name} is not a directory.") return for item in os.listdir(directory): item_path = directory / item if item_path.is_dir(): LOGGER.debug(f"Remove dir {item_path.as_posix()}") shutil.rmtree(item_path.as_posix()) elif item_path.is_file(): LOGGER.debug(f"Remove file: {item_path.as_posix()}") item_path.unlink() else: LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.") def exec_command(command: Command) -> None: """ Execute command Args: command: Command to run """ try: process = subprocess.Popen( [str(command)], shell=True, start_new_session=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", ) while True: output = process.stdout.readline() if output == "" and process.poll() is not None: break if output: print(output.rstrip()) LOGGER.write(output) result = process.poll() if result != 0: raise RunnerException(f"Command {command} failed with exit status: {result}") except subprocess.CalledProcessError as e: raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, Tuple # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .stages import ( ConversionStage, DeployStage, ExportStage, ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage, TritonPreparePerformanceProfilingDataStage, ) class Pipeline: """ Definition of stages that has to be executed before and during experiments """ # Stages to execute as part of single experiment _experiment_stages = [ ExportStage.label, ConversionStage.label, DeployStage.label, TritonPreparePerformanceProfilingDataStage.label, TritonPerformanceOfflineStage.label, TritonPerformanceOnlineStage.label, ] def __init__(self): """ Initialize pipeline """ self._stages: Dict = dict() def model_export(self, commands: Tuple[str, ...]) -> None: """ Model export stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ExportStage(commands=commands) self._stages[stage.label] = stage def model_conversion(self, commands: Tuple[str, ...]) -> None: """ Model conversion stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ConversionStage(commands=commands) self._stages[stage.label] = stage def model_deploy(self, commands: Tuple[str, ...]) -> None: """ Model deployment stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = DeployStage(commands=commands) self._stages[stage.label] = stage def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None: """ Model profiling data creation stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = TritonPreparePerformanceProfilingDataStage(commands=commands) self._stages[stage.label] = stage def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance offline test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOfflineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE, ) self._stages[stage.label] = stage def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance online test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOnlineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_ONLINE, ) self._stages[stage.label] = stage def stages(self): """ Generate stages which should be run per experiment Returns: Generator with stages object """ for stage_name in self._experiment_stages: stage = self._stages.get(stage_name) if not stage: continue yield stage
DeepLearningExamples-master
PyTorch/LanguageModeling/BERT/triton/runner/pipeline.py